##// END OF EJS Templates
use 'x is None' instead of 'x == None'...
Martin Geisler -
r8527:f9a80054 default
parent child Browse files
Show More
@@ -1,105 +1,105 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 import os, sys, struct, stat
3 import os, sys, struct, stat
4 import difflib
4 import difflib
5 import re
5 import re
6 from optparse import OptionParser
6 from optparse import OptionParser
7 from mercurial.bdiff import bdiff, blocks
7 from mercurial.bdiff import bdiff, blocks
8 from mercurial.mdiff import bunidiff, diffopts
8 from mercurial.mdiff import bunidiff, diffopts
9
9
10 VERSION="0.3"
10 VERSION="0.3"
11 usage = "usage: %prog [options] file1 file2"
11 usage = "usage: %prog [options] file1 file2"
12 parser = OptionParser(usage=usage)
12 parser = OptionParser(usage=usage)
13
13
14 parser.add_option("-d", "--difflib", action="store_true", default=False)
14 parser.add_option("-d", "--difflib", action="store_true", default=False)
15 parser.add_option('-x', '--count', default=1)
15 parser.add_option('-x', '--count', default=1)
16 parser.add_option('-c', '--context', type="int", default=3)
16 parser.add_option('-c', '--context', type="int", default=3)
17 parser.add_option('-p', '--show-c-function', action="store_true", default=False)
17 parser.add_option('-p', '--show-c-function', action="store_true", default=False)
18 parser.add_option('-w', '--ignore-all-space', action="store_true",
18 parser.add_option('-w', '--ignore-all-space', action="store_true",
19 default=False)
19 default=False)
20
20
21 (options, args) = parser.parse_args()
21 (options, args) = parser.parse_args()
22
22
23 if not args:
23 if not args:
24 parser.print_help()
24 parser.print_help()
25 sys.exit(1)
25 sys.exit(1)
26
26
27 # simple utility function to put all the
27 # simple utility function to put all the
28 # files from a directory tree into a dict
28 # files from a directory tree into a dict
29 def buildlist(names, top):
29 def buildlist(names, top):
30 tlen = len(top)
30 tlen = len(top)
31 for root, dirs, files in os.walk(top):
31 for root, dirs, files in os.walk(top):
32 l = root[tlen + 1:]
32 l = root[tlen + 1:]
33 for x in files:
33 for x in files:
34 p = os.path.join(root, x)
34 p = os.path.join(root, x)
35 st = os.lstat(p)
35 st = os.lstat(p)
36 if stat.S_ISREG(st.st_mode):
36 if stat.S_ISREG(st.st_mode):
37 names[os.path.join(l, x)] = (st.st_dev, st.st_ino)
37 names[os.path.join(l, x)] = (st.st_dev, st.st_ino)
38
38
39 def diff_files(file1, file2):
39 def diff_files(file1, file2):
40 if file1 == None:
40 if file1 is None:
41 b = file(file2).read().splitlines(1)
41 b = file(file2).read().splitlines(1)
42 l1 = "--- %s\n" % (file2)
42 l1 = "--- %s\n" % (file2)
43 l2 = "+++ %s\n" % (file2)
43 l2 = "+++ %s\n" % (file2)
44 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
44 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
45 l = [l1, l2, l3] + ["+" + e for e in b]
45 l = [l1, l2, l3] + ["+" + e for e in b]
46 elif file2 == None:
46 elif file2 is None:
47 a = file(file1).read().splitlines(1)
47 a = file(file1).read().splitlines(1)
48 l1 = "--- %s\n" % (file1)
48 l1 = "--- %s\n" % (file1)
49 l2 = "+++ %s\n" % (file1)
49 l2 = "+++ %s\n" % (file1)
50 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
50 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
51 l = [l1, l2, l3] + ["-" + e for e in a]
51 l = [l1, l2, l3] + ["-" + e for e in a]
52 else:
52 else:
53 t1 = file(file1).read()
53 t1 = file(file1).read()
54 t2 = file(file2).read()
54 t2 = file(file2).read()
55 l1 = t1.splitlines(1)
55 l1 = t1.splitlines(1)
56 l2 = t2.splitlines(1)
56 l2 = t2.splitlines(1)
57 if options.difflib:
57 if options.difflib:
58 l = difflib.unified_diff(l1, l2, file1, file2)
58 l = difflib.unified_diff(l1, l2, file1, file2)
59 else:
59 else:
60 l = bunidiff(t1, t2, l1, l2, file1, file2,
60 l = bunidiff(t1, t2, l1, l2, file1, file2,
61 diffopts(context=options.context,
61 diffopts(context=options.context,
62 showfunc=options.show_c_function,
62 showfunc=options.show_c_function,
63 ignorews=options.ignore_all_space))
63 ignorews=options.ignore_all_space))
64 for x in l:
64 for x in l:
65 if x[-1] != '\n':
65 if x[-1] != '\n':
66 x += "\n\ No newline at end of file\n"
66 x += "\n\ No newline at end of file\n"
67 print x,
67 print x,
68
68
69 file1 = args[0]
69 file1 = args[0]
70 file2 = args[1]
70 file2 = args[1]
71
71
72 if os.path.isfile(file1) and os.path.isfile(file2):
72 if os.path.isfile(file1) and os.path.isfile(file2):
73 diff_files(file1, file2)
73 diff_files(file1, file2)
74 elif os.path.isdir(file1):
74 elif os.path.isdir(file1):
75 if not os.path.isdir(file2):
75 if not os.path.isdir(file2):
76 sys.stderr.write("file types don't match\n")
76 sys.stderr.write("file types don't match\n")
77 sys.exit(1)
77 sys.exit(1)
78
78
79 d1 = {}
79 d1 = {}
80 d2 = {}
80 d2 = {}
81
81
82 buildlist(d1, file1)
82 buildlist(d1, file1)
83 buildlist(d2, file2)
83 buildlist(d2, file2)
84 keys = d1.keys()
84 keys = d1.keys()
85 keys.sort()
85 keys.sort()
86 for x in keys:
86 for x in keys:
87 if x not in d2:
87 if x not in d2:
88 f2 = None
88 f2 = None
89 else:
89 else:
90 f2 = os.path.join(file2, x)
90 f2 = os.path.join(file2, x)
91 st1 = d1[x]
91 st1 = d1[x]
92 st2 = d2[x]
92 st2 = d2[x]
93 del d2[x]
93 del d2[x]
94 if st1[0] == st2[0] and st1[1] == st2[1]:
94 if st1[0] == st2[0] and st1[1] == st2[1]:
95 sys.stderr.write("%s is a hard link\n" % x)
95 sys.stderr.write("%s is a hard link\n" % x)
96 continue
96 continue
97 x = os.path.join(file1, x)
97 x = os.path.join(file1, x)
98 diff_files(x, f2)
98 diff_files(x, f2)
99 keys = d2.keys()
99 keys = d2.keys()
100 keys.sort()
100 keys.sort()
101 for x in keys:
101 for x in keys:
102 f1 = None
102 f1 = None
103 x = os.path.join(file2, x)
103 x = os.path.join(file2, x)
104 diff_files(f1, x)
104 diff_files(f1, x)
105
105
@@ -1,322 +1,322 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''mercurial bookmarks
8 '''mercurial bookmarks
9
9
10 Mercurial bookmarks are local moveable pointers to changesets. Every
10 Mercurial bookmarks are local moveable pointers to changesets. Every
11 bookmark points to a changeset identified by its hash. If you commit a
11 bookmark points to a changeset identified by its hash. If you commit a
12 changeset that is based on a changeset that has a bookmark on it, the
12 changeset that is based on a changeset that has a bookmark on it, the
13 bookmark is forwarded to the new changeset.
13 bookmark is forwarded to the new changeset.
14
14
15 It is possible to use bookmark names in every revision lookup (e.g. hg
15 It is possible to use bookmark names in every revision lookup (e.g. hg
16 merge, hg update).
16 merge, hg update).
17
17
18 The bookmark extension offers the possiblity to have a more git-like
18 The bookmark extension offers the possiblity to have a more git-like
19 experience by adding the following configuration option to your .hgrc:
19 experience by adding the following configuration option to your .hgrc:
20
20
21 [bookmarks]
21 [bookmarks]
22 track.current = True
22 track.current = True
23
23
24 This will cause bookmarks to track the bookmark that you are currently
24 This will cause bookmarks to track the bookmark that you are currently
25 on, and just updates it. This is similar to git's approach of
25 on, and just updates it. This is similar to git's approach of
26 branching.
26 branching.
27 '''
27 '''
28
28
29 from mercurial.i18n import _
29 from mercurial.i18n import _
30 from mercurial.node import nullid, nullrev, hex, short
30 from mercurial.node import nullid, nullrev, hex, short
31 from mercurial import util, commands, localrepo, repair, extensions
31 from mercurial import util, commands, localrepo, repair, extensions
32 import os
32 import os
33
33
34 def parse(repo):
34 def parse(repo):
35 '''Parse .hg/bookmarks file and return a dictionary
35 '''Parse .hg/bookmarks file and return a dictionary
36
36
37 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
37 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
38 in the .hg/bookmarks file. They are read by the parse() method and
38 in the .hg/bookmarks file. They are read by the parse() method and
39 returned as a dictionary with name => hash values.
39 returned as a dictionary with name => hash values.
40
40
41 The parsed dictionary is cached until a write() operation is done.
41 The parsed dictionary is cached until a write() operation is done.
42 '''
42 '''
43 try:
43 try:
44 if repo._bookmarks:
44 if repo._bookmarks:
45 return repo._bookmarks
45 return repo._bookmarks
46 repo._bookmarks = {}
46 repo._bookmarks = {}
47 for line in repo.opener('bookmarks'):
47 for line in repo.opener('bookmarks'):
48 sha, refspec = line.strip().split(' ', 1)
48 sha, refspec = line.strip().split(' ', 1)
49 repo._bookmarks[refspec] = repo.lookup(sha)
49 repo._bookmarks[refspec] = repo.lookup(sha)
50 except:
50 except:
51 pass
51 pass
52 return repo._bookmarks
52 return repo._bookmarks
53
53
54 def write(repo, refs):
54 def write(repo, refs):
55 '''Write bookmarks
55 '''Write bookmarks
56
56
57 Write the given bookmark => hash dictionary to the .hg/bookmarks file
57 Write the given bookmark => hash dictionary to the .hg/bookmarks file
58 in a format equal to those of localtags.
58 in a format equal to those of localtags.
59
59
60 We also store a backup of the previous state in undo.bookmarks that
60 We also store a backup of the previous state in undo.bookmarks that
61 can be copied back on rollback.
61 can be copied back on rollback.
62 '''
62 '''
63 if os.path.exists(repo.join('bookmarks')):
63 if os.path.exists(repo.join('bookmarks')):
64 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
64 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
65 if current(repo) not in refs:
65 if current(repo) not in refs:
66 setcurrent(repo, None)
66 setcurrent(repo, None)
67 file = repo.opener('bookmarks', 'w+')
67 file = repo.opener('bookmarks', 'w+')
68 for refspec, node in refs.iteritems():
68 for refspec, node in refs.iteritems():
69 file.write("%s %s\n" % (hex(node), refspec))
69 file.write("%s %s\n" % (hex(node), refspec))
70 file.close()
70 file.close()
71
71
72 def current(repo):
72 def current(repo):
73 '''Get the current bookmark
73 '''Get the current bookmark
74
74
75 If we use gittishsh branches we have a current bookmark that
75 If we use gittishsh branches we have a current bookmark that
76 we are on. This function returns the name of the bookmark. It
76 we are on. This function returns the name of the bookmark. It
77 is stored in .hg/bookmarks.current
77 is stored in .hg/bookmarks.current
78 '''
78 '''
79 if repo._bookmarkcurrent:
79 if repo._bookmarkcurrent:
80 return repo._bookmarkcurrent
80 return repo._bookmarkcurrent
81 mark = None
81 mark = None
82 if os.path.exists(repo.join('bookmarks.current')):
82 if os.path.exists(repo.join('bookmarks.current')):
83 file = repo.opener('bookmarks.current')
83 file = repo.opener('bookmarks.current')
84 # No readline() in posixfile_nt, reading everything is cheap
84 # No readline() in posixfile_nt, reading everything is cheap
85 mark = (file.readlines() or [''])[0]
85 mark = (file.readlines() or [''])[0]
86 if mark == '':
86 if mark == '':
87 mark = None
87 mark = None
88 file.close()
88 file.close()
89 repo._bookmarkcurrent = mark
89 repo._bookmarkcurrent = mark
90 return mark
90 return mark
91
91
92 def setcurrent(repo, mark):
92 def setcurrent(repo, mark):
93 '''Set the name of the bookmark that we are currently on
93 '''Set the name of the bookmark that we are currently on
94
94
95 Set the name of the bookmark that we are on (hg update <bookmark>).
95 Set the name of the bookmark that we are on (hg update <bookmark>).
96 The name is recorded in .hg/bookmarks.current
96 The name is recorded in .hg/bookmarks.current
97 '''
97 '''
98 if current(repo) == mark:
98 if current(repo) == mark:
99 return
99 return
100
100
101 refs = parse(repo)
101 refs = parse(repo)
102
102
103 # do not update if we do update to a rev equal to the current bookmark
103 # do not update if we do update to a rev equal to the current bookmark
104 if (mark and mark not in refs and
104 if (mark and mark not in refs and
105 current(repo) and refs[current(repo)] == repo.changectx('.').node()):
105 current(repo) and refs[current(repo)] == repo.changectx('.').node()):
106 return
106 return
107 if mark not in refs:
107 if mark not in refs:
108 mark = ''
108 mark = ''
109 file = repo.opener('bookmarks.current', 'w+')
109 file = repo.opener('bookmarks.current', 'w+')
110 file.write(mark)
110 file.write(mark)
111 file.close()
111 file.close()
112 repo._bookmarkcurrent = mark
112 repo._bookmarkcurrent = mark
113
113
114 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
114 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
115 '''mercurial bookmarks
115 '''mercurial bookmarks
116
116
117 Bookmarks are pointers to certain commits that move when
117 Bookmarks are pointers to certain commits that move when
118 commiting. Bookmarks are local. They can be renamed, copied and
118 commiting. Bookmarks are local. They can be renamed, copied and
119 deleted. It is possible to use bookmark names in 'hg merge' and
119 deleted. It is possible to use bookmark names in 'hg merge' and
120 'hg update' to update to a given bookmark.
120 'hg update' to update to a given bookmark.
121
121
122 You can use 'hg bookmark NAME' to set a bookmark on the current
122 You can use 'hg bookmark NAME' to set a bookmark on the current
123 tip with the given name. If you specify a revision using -r REV
123 tip with the given name. If you specify a revision using -r REV
124 (where REV may be an existing bookmark), the bookmark is set to
124 (where REV may be an existing bookmark), the bookmark is set to
125 that revision.
125 that revision.
126 '''
126 '''
127 hexfn = ui.debugflag and hex or short
127 hexfn = ui.debugflag and hex or short
128 marks = parse(repo)
128 marks = parse(repo)
129 cur = repo.changectx('.').node()
129 cur = repo.changectx('.').node()
130
130
131 if rename:
131 if rename:
132 if rename not in marks:
132 if rename not in marks:
133 raise util.Abort(_("a bookmark of this name does not exist"))
133 raise util.Abort(_("a bookmark of this name does not exist"))
134 if mark in marks and not force:
134 if mark in marks and not force:
135 raise util.Abort(_("a bookmark of the same name already exists"))
135 raise util.Abort(_("a bookmark of the same name already exists"))
136 if mark is None:
136 if mark is None:
137 raise util.Abort(_("new bookmark name required"))
137 raise util.Abort(_("new bookmark name required"))
138 marks[mark] = marks[rename]
138 marks[mark] = marks[rename]
139 del marks[rename]
139 del marks[rename]
140 if current(repo) == rename:
140 if current(repo) == rename:
141 setcurrent(repo, mark)
141 setcurrent(repo, mark)
142 write(repo, marks)
142 write(repo, marks)
143 return
143 return
144
144
145 if delete:
145 if delete:
146 if mark == None:
146 if mark is None:
147 raise util.Abort(_("bookmark name required"))
147 raise util.Abort(_("bookmark name required"))
148 if mark not in marks:
148 if mark not in marks:
149 raise util.Abort(_("a bookmark of this name does not exist"))
149 raise util.Abort(_("a bookmark of this name does not exist"))
150 if mark == current(repo):
150 if mark == current(repo):
151 setcurrent(repo, None)
151 setcurrent(repo, None)
152 del marks[mark]
152 del marks[mark]
153 write(repo, marks)
153 write(repo, marks)
154 return
154 return
155
155
156 if mark != None:
156 if mark != None:
157 if "\n" in mark:
157 if "\n" in mark:
158 raise util.Abort(_("bookmark name cannot contain newlines"))
158 raise util.Abort(_("bookmark name cannot contain newlines"))
159 mark = mark.strip()
159 mark = mark.strip()
160 if mark in marks and not force:
160 if mark in marks and not force:
161 raise util.Abort(_("a bookmark of the same name already exists"))
161 raise util.Abort(_("a bookmark of the same name already exists"))
162 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
162 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
163 and not force):
163 and not force):
164 raise util.Abort(
164 raise util.Abort(
165 _("a bookmark cannot have the name of an existing branch"))
165 _("a bookmark cannot have the name of an existing branch"))
166 if rev:
166 if rev:
167 marks[mark] = repo.lookup(rev)
167 marks[mark] = repo.lookup(rev)
168 else:
168 else:
169 marks[mark] = repo.changectx('.').node()
169 marks[mark] = repo.changectx('.').node()
170 setcurrent(repo, mark)
170 setcurrent(repo, mark)
171 write(repo, marks)
171 write(repo, marks)
172 return
172 return
173
173
174 if mark == None:
174 if mark is None:
175 if rev:
175 if rev:
176 raise util.Abort(_("bookmark name required"))
176 raise util.Abort(_("bookmark name required"))
177 if len(marks) == 0:
177 if len(marks) == 0:
178 ui.status("no bookmarks set\n")
178 ui.status("no bookmarks set\n")
179 else:
179 else:
180 for bmark, n in marks.iteritems():
180 for bmark, n in marks.iteritems():
181 if ui.configbool('bookmarks', 'track.current'):
181 if ui.configbool('bookmarks', 'track.current'):
182 prefix = (bmark == current(repo) and n == cur) and '*' or ' '
182 prefix = (bmark == current(repo) and n == cur) and '*' or ' '
183 else:
183 else:
184 prefix = (n == cur) and '*' or ' '
184 prefix = (n == cur) and '*' or ' '
185
185
186 ui.write(" %s %-25s %d:%s\n" % (
186 ui.write(" %s %-25s %d:%s\n" % (
187 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
187 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
188 return
188 return
189
189
190 def _revstostrip(changelog, node):
190 def _revstostrip(changelog, node):
191 srev = changelog.rev(node)
191 srev = changelog.rev(node)
192 tostrip = [srev]
192 tostrip = [srev]
193 saveheads = []
193 saveheads = []
194 for r in xrange(srev, len(changelog)):
194 for r in xrange(srev, len(changelog)):
195 parents = changelog.parentrevs(r)
195 parents = changelog.parentrevs(r)
196 if parents[0] in tostrip or parents[1] in tostrip:
196 if parents[0] in tostrip or parents[1] in tostrip:
197 tostrip.append(r)
197 tostrip.append(r)
198 if parents[1] != nullrev:
198 if parents[1] != nullrev:
199 for p in parents:
199 for p in parents:
200 if p not in tostrip and p > srev:
200 if p not in tostrip and p > srev:
201 saveheads.append(p)
201 saveheads.append(p)
202 return [r for r in tostrip if r not in saveheads]
202 return [r for r in tostrip if r not in saveheads]
203
203
204 def strip(oldstrip, ui, repo, node, backup="all"):
204 def strip(oldstrip, ui, repo, node, backup="all"):
205 """Strip bookmarks if revisions are stripped using
205 """Strip bookmarks if revisions are stripped using
206 the mercurial.strip method. This usually happens during
206 the mercurial.strip method. This usually happens during
207 qpush and qpop"""
207 qpush and qpop"""
208 revisions = _revstostrip(repo.changelog, node)
208 revisions = _revstostrip(repo.changelog, node)
209 marks = parse(repo)
209 marks = parse(repo)
210 update = []
210 update = []
211 for mark, n in marks.iteritems():
211 for mark, n in marks.iteritems():
212 if repo.changelog.rev(n) in revisions:
212 if repo.changelog.rev(n) in revisions:
213 update.append(mark)
213 update.append(mark)
214 oldstrip(ui, repo, node, backup)
214 oldstrip(ui, repo, node, backup)
215 if len(update) > 0:
215 if len(update) > 0:
216 for m in update:
216 for m in update:
217 marks[m] = repo.changectx('.').node()
217 marks[m] = repo.changectx('.').node()
218 write(repo, marks)
218 write(repo, marks)
219
219
220 def reposetup(ui, repo):
220 def reposetup(ui, repo):
221 if not isinstance(repo, localrepo.localrepository):
221 if not isinstance(repo, localrepo.localrepository):
222 return
222 return
223
223
224 # init a bookmark cache as otherwise we would get a infinite reading
224 # init a bookmark cache as otherwise we would get a infinite reading
225 # in lookup()
225 # in lookup()
226 repo._bookmarks = None
226 repo._bookmarks = None
227 repo._bookmarkcurrent = None
227 repo._bookmarkcurrent = None
228
228
229 class bookmark_repo(repo.__class__):
229 class bookmark_repo(repo.__class__):
230 def rollback(self):
230 def rollback(self):
231 if os.path.exists(self.join('undo.bookmarks')):
231 if os.path.exists(self.join('undo.bookmarks')):
232 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
232 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
233 return super(bookmark_repo, self).rollback()
233 return super(bookmark_repo, self).rollback()
234
234
235 def lookup(self, key):
235 def lookup(self, key):
236 if self._bookmarks is None:
236 if self._bookmarks is None:
237 self._bookmarks = parse(self)
237 self._bookmarks = parse(self)
238 if key in self._bookmarks:
238 if key in self._bookmarks:
239 key = self._bookmarks[key]
239 key = self._bookmarks[key]
240 return super(bookmark_repo, self).lookup(key)
240 return super(bookmark_repo, self).lookup(key)
241
241
242 def commit(self, *k, **kw):
242 def commit(self, *k, **kw):
243 """Add a revision to the repository and
243 """Add a revision to the repository and
244 move the bookmark"""
244 move the bookmark"""
245 node = super(bookmark_repo, self).commit(*k, **kw)
245 node = super(bookmark_repo, self).commit(*k, **kw)
246 if node == None:
246 if node is None:
247 return None
247 return None
248 parents = repo.changelog.parents(node)
248 parents = repo.changelog.parents(node)
249 if parents[1] == nullid:
249 if parents[1] == nullid:
250 parents = (parents[0],)
250 parents = (parents[0],)
251 marks = parse(repo)
251 marks = parse(repo)
252 update = False
252 update = False
253 for mark, n in marks.items():
253 for mark, n in marks.items():
254 if ui.configbool('bookmarks', 'track.current'):
254 if ui.configbool('bookmarks', 'track.current'):
255 if mark == current(repo) and n in parents:
255 if mark == current(repo) and n in parents:
256 marks[mark] = node
256 marks[mark] = node
257 update = True
257 update = True
258 else:
258 else:
259 if n in parents:
259 if n in parents:
260 marks[mark] = node
260 marks[mark] = node
261 update = True
261 update = True
262 if update:
262 if update:
263 write(repo, marks)
263 write(repo, marks)
264 return node
264 return node
265
265
266 def addchangegroup(self, source, srctype, url, emptyok=False):
266 def addchangegroup(self, source, srctype, url, emptyok=False):
267 parents = repo.dirstate.parents()
267 parents = repo.dirstate.parents()
268
268
269 result = super(bookmark_repo, self).addchangegroup(
269 result = super(bookmark_repo, self).addchangegroup(
270 source, srctype, url, emptyok)
270 source, srctype, url, emptyok)
271 if result > 1:
271 if result > 1:
272 # We have more heads than before
272 # We have more heads than before
273 return result
273 return result
274 node = repo.changelog.tip()
274 node = repo.changelog.tip()
275 marks = parse(repo)
275 marks = parse(repo)
276 update = False
276 update = False
277 for mark, n in marks.items():
277 for mark, n in marks.items():
278 if n in parents:
278 if n in parents:
279 marks[mark] = node
279 marks[mark] = node
280 update = True
280 update = True
281 if update:
281 if update:
282 write(repo, marks)
282 write(repo, marks)
283 return result
283 return result
284
284
285 def tags(self):
285 def tags(self):
286 """Merge bookmarks with normal tags"""
286 """Merge bookmarks with normal tags"""
287 if self.tagscache:
287 if self.tagscache:
288 return self.tagscache
288 return self.tagscache
289
289
290 tagscache = super(bookmark_repo, self).tags()
290 tagscache = super(bookmark_repo, self).tags()
291 tagscache.update(parse(repo))
291 tagscache.update(parse(repo))
292 return tagscache
292 return tagscache
293
293
294 repo.__class__ = bookmark_repo
294 repo.__class__ = bookmark_repo
295
295
296 def uisetup(ui):
296 def uisetup(ui):
297 extensions.wrapfunction(repair, "strip", strip)
297 extensions.wrapfunction(repair, "strip", strip)
298 if ui.configbool('bookmarks', 'track.current'):
298 if ui.configbool('bookmarks', 'track.current'):
299 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
299 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
300
300
301 def updatecurbookmark(orig, ui, repo, *args, **opts):
301 def updatecurbookmark(orig, ui, repo, *args, **opts):
302 '''Set the current bookmark
302 '''Set the current bookmark
303
303
304 If the user updates to a bookmark we update the .hg/bookmarks.current
304 If the user updates to a bookmark we update the .hg/bookmarks.current
305 file.
305 file.
306 '''
306 '''
307 res = orig(ui, repo, *args, **opts)
307 res = orig(ui, repo, *args, **opts)
308 rev = opts['rev']
308 rev = opts['rev']
309 if not rev and len(args) > 0:
309 if not rev and len(args) > 0:
310 rev = args[0]
310 rev = args[0]
311 setcurrent(repo, rev)
311 setcurrent(repo, rev)
312 return res
312 return res
313
313
314 cmdtable = {
314 cmdtable = {
315 "bookmarks":
315 "bookmarks":
316 (bookmark,
316 (bookmark,
317 [('f', 'force', False, _('force')),
317 [('f', 'force', False, _('force')),
318 ('r', 'rev', '', _('revision')),
318 ('r', 'rev', '', _('revision')),
319 ('d', 'delete', False, _('delete a given bookmark')),
319 ('d', 'delete', False, _('delete a given bookmark')),
320 ('m', 'rename', '', _('rename a given bookmark'))],
320 ('m', 'rename', '', _('rename a given bookmark'))],
321 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
321 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
322 }
322 }
@@ -1,2637 +1,2637 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial.lock import release
34 from mercurial.lock import release
35 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import commands, cmdutil, hg, patch, util
36 from mercurial import repair, extensions, url, error
36 from mercurial import repair, extensions, url, error
37 import os, sys, re, errno
37 import os, sys, re, errno
38
38
39 commands.norepo += " qclone"
39 commands.norepo += " qclone"
40
40
41 # Patch names looks like unix-file names.
41 # Patch names looks like unix-file names.
42 # They must be joinable with queue directory and result in the patch path.
42 # They must be joinable with queue directory and result in the patch path.
43 normname = util.normpath
43 normname = util.normpath
44
44
45 class statusentry:
45 class statusentry:
46 def __init__(self, rev, name=None):
46 def __init__(self, rev, name=None):
47 if not name:
47 if not name:
48 fields = rev.split(':', 1)
48 fields = rev.split(':', 1)
49 if len(fields) == 2:
49 if len(fields) == 2:
50 self.rev, self.name = fields
50 self.rev, self.name = fields
51 else:
51 else:
52 self.rev, self.name = None, None
52 self.rev, self.name = None, None
53 else:
53 else:
54 self.rev, self.name = rev, name
54 self.rev, self.name = rev, name
55
55
56 def __str__(self):
56 def __str__(self):
57 return self.rev + ':' + self.name
57 return self.rev + ':' + self.name
58
58
59 class patchheader(object):
59 class patchheader(object):
60 def __init__(self, message, comments, user, date, haspatch):
60 def __init__(self, message, comments, user, date, haspatch):
61 self.message = message
61 self.message = message
62 self.comments = comments
62 self.comments = comments
63 self.user = user
63 self.user = user
64 self.date = date
64 self.date = date
65 self.haspatch = haspatch
65 self.haspatch = haspatch
66
66
67 def setuser(self, user):
67 def setuser(self, user):
68 if not self.setheader(['From: ', '# User '], user):
68 if not self.setheader(['From: ', '# User '], user):
69 try:
69 try:
70 patchheaderat = self.comments.index('# HG changeset patch')
70 patchheaderat = self.comments.index('# HG changeset patch')
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 except ValueError:
72 except ValueError:
73 self.comments = ['From: ' + user, ''] + self.comments
73 self.comments = ['From: ' + user, ''] + self.comments
74 self.user = user
74 self.user = user
75
75
76 def setdate(self, date):
76 def setdate(self, date):
77 if self.setheader(['# Date '], date):
77 if self.setheader(['# Date '], date):
78 self.date = date
78 self.date = date
79
79
80 def setmessage(self, message):
80 def setmessage(self, message):
81 if self.comments:
81 if self.comments:
82 self._delmsg()
82 self._delmsg()
83 self.message = [message]
83 self.message = [message]
84 self.comments += self.message
84 self.comments += self.message
85
85
86 def setheader(self, prefixes, new):
86 def setheader(self, prefixes, new):
87 '''Update all references to a field in the patch header.
87 '''Update all references to a field in the patch header.
88 If none found, add it email style.'''
88 If none found, add it email style.'''
89 res = False
89 res = False
90 for prefix in prefixes:
90 for prefix in prefixes:
91 for i in xrange(len(self.comments)):
91 for i in xrange(len(self.comments)):
92 if self.comments[i].startswith(prefix):
92 if self.comments[i].startswith(prefix):
93 self.comments[i] = prefix + new
93 self.comments[i] = prefix + new
94 res = True
94 res = True
95 break
95 break
96 return res
96 return res
97
97
98 def __str__(self):
98 def __str__(self):
99 if not self.comments:
99 if not self.comments:
100 return ''
100 return ''
101 return '\n'.join(self.comments) + '\n\n'
101 return '\n'.join(self.comments) + '\n\n'
102
102
103 def _delmsg(self):
103 def _delmsg(self):
104 '''Remove existing message, keeping the rest of the comments fields.
104 '''Remove existing message, keeping the rest of the comments fields.
105 If comments contains 'subject: ', message will prepend
105 If comments contains 'subject: ', message will prepend
106 the field and a blank line.'''
106 the field and a blank line.'''
107 if self.message:
107 if self.message:
108 subj = 'subject: ' + self.message[0].lower()
108 subj = 'subject: ' + self.message[0].lower()
109 for i in xrange(len(self.comments)):
109 for i in xrange(len(self.comments)):
110 if subj == self.comments[i].lower():
110 if subj == self.comments[i].lower():
111 del self.comments[i]
111 del self.comments[i]
112 self.message = self.message[2:]
112 self.message = self.message[2:]
113 break
113 break
114 ci = 0
114 ci = 0
115 for mi in xrange(len(self.message)):
115 for mi in xrange(len(self.message)):
116 while self.message[mi] != self.comments[ci]:
116 while self.message[mi] != self.comments[ci]:
117 ci += 1
117 ci += 1
118 del self.comments[ci]
118 del self.comments[ci]
119
119
120 class queue:
120 class queue:
121 def __init__(self, ui, path, patchdir=None):
121 def __init__(self, ui, path, patchdir=None):
122 self.basepath = path
122 self.basepath = path
123 self.path = patchdir or os.path.join(path, "patches")
123 self.path = patchdir or os.path.join(path, "patches")
124 self.opener = util.opener(self.path)
124 self.opener = util.opener(self.path)
125 self.ui = ui
125 self.ui = ui
126 self.applied_dirty = 0
126 self.applied_dirty = 0
127 self.series_dirty = 0
127 self.series_dirty = 0
128 self.series_path = "series"
128 self.series_path = "series"
129 self.status_path = "status"
129 self.status_path = "status"
130 self.guards_path = "guards"
130 self.guards_path = "guards"
131 self.active_guards = None
131 self.active_guards = None
132 self.guards_dirty = False
132 self.guards_dirty = False
133 self._diffopts = None
133 self._diffopts = None
134
134
135 @util.propertycache
135 @util.propertycache
136 def applied(self):
136 def applied(self):
137 if os.path.exists(self.join(self.status_path)):
137 if os.path.exists(self.join(self.status_path)):
138 lines = self.opener(self.status_path).read().splitlines()
138 lines = self.opener(self.status_path).read().splitlines()
139 return [statusentry(l) for l in lines]
139 return [statusentry(l) for l in lines]
140 return []
140 return []
141
141
142 @util.propertycache
142 @util.propertycache
143 def full_series(self):
143 def full_series(self):
144 if os.path.exists(self.join(self.series_path)):
144 if os.path.exists(self.join(self.series_path)):
145 return self.opener(self.series_path).read().splitlines()
145 return self.opener(self.series_path).read().splitlines()
146 return []
146 return []
147
147
148 @util.propertycache
148 @util.propertycache
149 def series(self):
149 def series(self):
150 self.parse_series()
150 self.parse_series()
151 return self.series
151 return self.series
152
152
153 @util.propertycache
153 @util.propertycache
154 def series_guards(self):
154 def series_guards(self):
155 self.parse_series()
155 self.parse_series()
156 return self.series_guards
156 return self.series_guards
157
157
158 def invalidate(self):
158 def invalidate(self):
159 for a in 'applied full_series series series_guards'.split():
159 for a in 'applied full_series series series_guards'.split():
160 if a in self.__dict__:
160 if a in self.__dict__:
161 delattr(self, a)
161 delattr(self, a)
162 self.applied_dirty = 0
162 self.applied_dirty = 0
163 self.series_dirty = 0
163 self.series_dirty = 0
164 self.guards_dirty = False
164 self.guards_dirty = False
165 self.active_guards = None
165 self.active_guards = None
166
166
167 def diffopts(self):
167 def diffopts(self):
168 if self._diffopts is None:
168 if self._diffopts is None:
169 self._diffopts = patch.diffopts(self.ui)
169 self._diffopts = patch.diffopts(self.ui)
170 return self._diffopts
170 return self._diffopts
171
171
172 def join(self, *p):
172 def join(self, *p):
173 return os.path.join(self.path, *p)
173 return os.path.join(self.path, *p)
174
174
175 def find_series(self, patch):
175 def find_series(self, patch):
176 pre = re.compile("(\s*)([^#]+)")
176 pre = re.compile("(\s*)([^#]+)")
177 index = 0
177 index = 0
178 for l in self.full_series:
178 for l in self.full_series:
179 m = pre.match(l)
179 m = pre.match(l)
180 if m:
180 if m:
181 s = m.group(2)
181 s = m.group(2)
182 s = s.rstrip()
182 s = s.rstrip()
183 if s == patch:
183 if s == patch:
184 return index
184 return index
185 index += 1
185 index += 1
186 return None
186 return None
187
187
188 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
188 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
189
189
190 def parse_series(self):
190 def parse_series(self):
191 self.series = []
191 self.series = []
192 self.series_guards = []
192 self.series_guards = []
193 for l in self.full_series:
193 for l in self.full_series:
194 h = l.find('#')
194 h = l.find('#')
195 if h == -1:
195 if h == -1:
196 patch = l
196 patch = l
197 comment = ''
197 comment = ''
198 elif h == 0:
198 elif h == 0:
199 continue
199 continue
200 else:
200 else:
201 patch = l[:h]
201 patch = l[:h]
202 comment = l[h:]
202 comment = l[h:]
203 patch = patch.strip()
203 patch = patch.strip()
204 if patch:
204 if patch:
205 if patch in self.series:
205 if patch in self.series:
206 raise util.Abort(_('%s appears more than once in %s') %
206 raise util.Abort(_('%s appears more than once in %s') %
207 (patch, self.join(self.series_path)))
207 (patch, self.join(self.series_path)))
208 self.series.append(patch)
208 self.series.append(patch)
209 self.series_guards.append(self.guard_re.findall(comment))
209 self.series_guards.append(self.guard_re.findall(comment))
210
210
211 def check_guard(self, guard):
211 def check_guard(self, guard):
212 if not guard:
212 if not guard:
213 return _('guard cannot be an empty string')
213 return _('guard cannot be an empty string')
214 bad_chars = '# \t\r\n\f'
214 bad_chars = '# \t\r\n\f'
215 first = guard[0]
215 first = guard[0]
216 if first in '-+':
216 if first in '-+':
217 return (_('guard %r starts with invalid character: %r') %
217 return (_('guard %r starts with invalid character: %r') %
218 (guard, first))
218 (guard, first))
219 for c in bad_chars:
219 for c in bad_chars:
220 if c in guard:
220 if c in guard:
221 return _('invalid character in guard %r: %r') % (guard, c)
221 return _('invalid character in guard %r: %r') % (guard, c)
222
222
223 def set_active(self, guards):
223 def set_active(self, guards):
224 for guard in guards:
224 for guard in guards:
225 bad = self.check_guard(guard)
225 bad = self.check_guard(guard)
226 if bad:
226 if bad:
227 raise util.Abort(bad)
227 raise util.Abort(bad)
228 guards = sorted(set(guards))
228 guards = sorted(set(guards))
229 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
229 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
230 self.active_guards = guards
230 self.active_guards = guards
231 self.guards_dirty = True
231 self.guards_dirty = True
232
232
233 def active(self):
233 def active(self):
234 if self.active_guards is None:
234 if self.active_guards is None:
235 self.active_guards = []
235 self.active_guards = []
236 try:
236 try:
237 guards = self.opener(self.guards_path).read().split()
237 guards = self.opener(self.guards_path).read().split()
238 except IOError, err:
238 except IOError, err:
239 if err.errno != errno.ENOENT: raise
239 if err.errno != errno.ENOENT: raise
240 guards = []
240 guards = []
241 for i, guard in enumerate(guards):
241 for i, guard in enumerate(guards):
242 bad = self.check_guard(guard)
242 bad = self.check_guard(guard)
243 if bad:
243 if bad:
244 self.ui.warn('%s:%d: %s\n' %
244 self.ui.warn('%s:%d: %s\n' %
245 (self.join(self.guards_path), i + 1, bad))
245 (self.join(self.guards_path), i + 1, bad))
246 else:
246 else:
247 self.active_guards.append(guard)
247 self.active_guards.append(guard)
248 return self.active_guards
248 return self.active_guards
249
249
250 def set_guards(self, idx, guards):
250 def set_guards(self, idx, guards):
251 for g in guards:
251 for g in guards:
252 if len(g) < 2:
252 if len(g) < 2:
253 raise util.Abort(_('guard %r too short') % g)
253 raise util.Abort(_('guard %r too short') % g)
254 if g[0] not in '-+':
254 if g[0] not in '-+':
255 raise util.Abort(_('guard %r starts with invalid char') % g)
255 raise util.Abort(_('guard %r starts with invalid char') % g)
256 bad = self.check_guard(g[1:])
256 bad = self.check_guard(g[1:])
257 if bad:
257 if bad:
258 raise util.Abort(bad)
258 raise util.Abort(bad)
259 drop = self.guard_re.sub('', self.full_series[idx])
259 drop = self.guard_re.sub('', self.full_series[idx])
260 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
260 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
261 self.parse_series()
261 self.parse_series()
262 self.series_dirty = True
262 self.series_dirty = True
263
263
264 def pushable(self, idx):
264 def pushable(self, idx):
265 if isinstance(idx, str):
265 if isinstance(idx, str):
266 idx = self.series.index(idx)
266 idx = self.series.index(idx)
267 patchguards = self.series_guards[idx]
267 patchguards = self.series_guards[idx]
268 if not patchguards:
268 if not patchguards:
269 return True, None
269 return True, None
270 guards = self.active()
270 guards = self.active()
271 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
271 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
272 if exactneg:
272 if exactneg:
273 return False, exactneg[0]
273 return False, exactneg[0]
274 pos = [g for g in patchguards if g[0] == '+']
274 pos = [g for g in patchguards if g[0] == '+']
275 exactpos = [g for g in pos if g[1:] in guards]
275 exactpos = [g for g in pos if g[1:] in guards]
276 if pos:
276 if pos:
277 if exactpos:
277 if exactpos:
278 return True, exactpos[0]
278 return True, exactpos[0]
279 return False, pos
279 return False, pos
280 return True, ''
280 return True, ''
281
281
282 def explain_pushable(self, idx, all_patches=False):
282 def explain_pushable(self, idx, all_patches=False):
283 write = all_patches and self.ui.write or self.ui.warn
283 write = all_patches and self.ui.write or self.ui.warn
284 if all_patches or self.ui.verbose:
284 if all_patches or self.ui.verbose:
285 if isinstance(idx, str):
285 if isinstance(idx, str):
286 idx = self.series.index(idx)
286 idx = self.series.index(idx)
287 pushable, why = self.pushable(idx)
287 pushable, why = self.pushable(idx)
288 if all_patches and pushable:
288 if all_patches and pushable:
289 if why is None:
289 if why is None:
290 write(_('allowing %s - no guards in effect\n') %
290 write(_('allowing %s - no guards in effect\n') %
291 self.series[idx])
291 self.series[idx])
292 else:
292 else:
293 if not why:
293 if not why:
294 write(_('allowing %s - no matching negative guards\n') %
294 write(_('allowing %s - no matching negative guards\n') %
295 self.series[idx])
295 self.series[idx])
296 else:
296 else:
297 write(_('allowing %s - guarded by %r\n') %
297 write(_('allowing %s - guarded by %r\n') %
298 (self.series[idx], why))
298 (self.series[idx], why))
299 if not pushable:
299 if not pushable:
300 if why:
300 if why:
301 write(_('skipping %s - guarded by %r\n') %
301 write(_('skipping %s - guarded by %r\n') %
302 (self.series[idx], why))
302 (self.series[idx], why))
303 else:
303 else:
304 write(_('skipping %s - no matching guards\n') %
304 write(_('skipping %s - no matching guards\n') %
305 self.series[idx])
305 self.series[idx])
306
306
307 def save_dirty(self):
307 def save_dirty(self):
308 def write_list(items, path):
308 def write_list(items, path):
309 fp = self.opener(path, 'w')
309 fp = self.opener(path, 'w')
310 for i in items:
310 for i in items:
311 fp.write("%s\n" % i)
311 fp.write("%s\n" % i)
312 fp.close()
312 fp.close()
313 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
313 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
314 if self.series_dirty: write_list(self.full_series, self.series_path)
314 if self.series_dirty: write_list(self.full_series, self.series_path)
315 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
315 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
316
316
317 def readheaders(self, patch):
317 def readheaders(self, patch):
318 def eatdiff(lines):
318 def eatdiff(lines):
319 while lines:
319 while lines:
320 l = lines[-1]
320 l = lines[-1]
321 if (l.startswith("diff -") or
321 if (l.startswith("diff -") or
322 l.startswith("Index:") or
322 l.startswith("Index:") or
323 l.startswith("===========")):
323 l.startswith("===========")):
324 del lines[-1]
324 del lines[-1]
325 else:
325 else:
326 break
326 break
327 def eatempty(lines):
327 def eatempty(lines):
328 while lines:
328 while lines:
329 l = lines[-1]
329 l = lines[-1]
330 if re.match('\s*$', l):
330 if re.match('\s*$', l):
331 del lines[-1]
331 del lines[-1]
332 else:
332 else:
333 break
333 break
334
334
335 pf = self.join(patch)
335 pf = self.join(patch)
336 message = []
336 message = []
337 comments = []
337 comments = []
338 user = None
338 user = None
339 date = None
339 date = None
340 format = None
340 format = None
341 subject = None
341 subject = None
342 diffstart = 0
342 diffstart = 0
343
343
344 for line in file(pf):
344 for line in file(pf):
345 line = line.rstrip()
345 line = line.rstrip()
346 if line.startswith('diff --git'):
346 if line.startswith('diff --git'):
347 diffstart = 2
347 diffstart = 2
348 break
348 break
349 if diffstart:
349 if diffstart:
350 if line.startswith('+++ '):
350 if line.startswith('+++ '):
351 diffstart = 2
351 diffstart = 2
352 break
352 break
353 if line.startswith("--- "):
353 if line.startswith("--- "):
354 diffstart = 1
354 diffstart = 1
355 continue
355 continue
356 elif format == "hgpatch":
356 elif format == "hgpatch":
357 # parse values when importing the result of an hg export
357 # parse values when importing the result of an hg export
358 if line.startswith("# User "):
358 if line.startswith("# User "):
359 user = line[7:]
359 user = line[7:]
360 elif line.startswith("# Date "):
360 elif line.startswith("# Date "):
361 date = line[7:]
361 date = line[7:]
362 elif not line.startswith("# ") and line:
362 elif not line.startswith("# ") and line:
363 message.append(line)
363 message.append(line)
364 format = None
364 format = None
365 elif line == '# HG changeset patch':
365 elif line == '# HG changeset patch':
366 format = "hgpatch"
366 format = "hgpatch"
367 elif (format != "tagdone" and (line.startswith("Subject: ") or
367 elif (format != "tagdone" and (line.startswith("Subject: ") or
368 line.startswith("subject: "))):
368 line.startswith("subject: "))):
369 subject = line[9:]
369 subject = line[9:]
370 format = "tag"
370 format = "tag"
371 elif (format != "tagdone" and (line.startswith("From: ") or
371 elif (format != "tagdone" and (line.startswith("From: ") or
372 line.startswith("from: "))):
372 line.startswith("from: "))):
373 user = line[6:]
373 user = line[6:]
374 format = "tag"
374 format = "tag"
375 elif format == "tag" and line == "":
375 elif format == "tag" and line == "":
376 # when looking for tags (subject: from: etc) they
376 # when looking for tags (subject: from: etc) they
377 # end once you find a blank line in the source
377 # end once you find a blank line in the source
378 format = "tagdone"
378 format = "tagdone"
379 elif message or line:
379 elif message or line:
380 message.append(line)
380 message.append(line)
381 comments.append(line)
381 comments.append(line)
382
382
383 eatdiff(message)
383 eatdiff(message)
384 eatdiff(comments)
384 eatdiff(comments)
385 eatempty(message)
385 eatempty(message)
386 eatempty(comments)
386 eatempty(comments)
387
387
388 # make sure message isn't empty
388 # make sure message isn't empty
389 if format and format.startswith("tag") and subject:
389 if format and format.startswith("tag") and subject:
390 message.insert(0, "")
390 message.insert(0, "")
391 message.insert(0, subject)
391 message.insert(0, subject)
392 return patchheader(message, comments, user, date, diffstart > 1)
392 return patchheader(message, comments, user, date, diffstart > 1)
393
393
394 def removeundo(self, repo):
394 def removeundo(self, repo):
395 undo = repo.sjoin('undo')
395 undo = repo.sjoin('undo')
396 if not os.path.exists(undo):
396 if not os.path.exists(undo):
397 return
397 return
398 try:
398 try:
399 os.unlink(undo)
399 os.unlink(undo)
400 except OSError, inst:
400 except OSError, inst:
401 self.ui.warn(_('error removing undo: %s\n') % str(inst))
401 self.ui.warn(_('error removing undo: %s\n') % str(inst))
402
402
403 def printdiff(self, repo, node1, node2=None, files=None,
403 def printdiff(self, repo, node1, node2=None, files=None,
404 fp=None, changes=None, opts={}):
404 fp=None, changes=None, opts={}):
405 m = cmdutil.match(repo, files, opts)
405 m = cmdutil.match(repo, files, opts)
406 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
406 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
407 write = fp is None and repo.ui.write or fp.write
407 write = fp is None and repo.ui.write or fp.write
408 for chunk in chunks:
408 for chunk in chunks:
409 write(chunk)
409 write(chunk)
410
410
411 def mergeone(self, repo, mergeq, head, patch, rev):
411 def mergeone(self, repo, mergeq, head, patch, rev):
412 # first try just applying the patch
412 # first try just applying the patch
413 (err, n) = self.apply(repo, [ patch ], update_status=False,
413 (err, n) = self.apply(repo, [ patch ], update_status=False,
414 strict=True, merge=rev)
414 strict=True, merge=rev)
415
415
416 if err == 0:
416 if err == 0:
417 return (err, n)
417 return (err, n)
418
418
419 if n is None:
419 if n is None:
420 raise util.Abort(_("apply failed for patch %s") % patch)
420 raise util.Abort(_("apply failed for patch %s") % patch)
421
421
422 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
422 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
423
423
424 # apply failed, strip away that rev and merge.
424 # apply failed, strip away that rev and merge.
425 hg.clean(repo, head)
425 hg.clean(repo, head)
426 self.strip(repo, n, update=False, backup='strip')
426 self.strip(repo, n, update=False, backup='strip')
427
427
428 ctx = repo[rev]
428 ctx = repo[rev]
429 ret = hg.merge(repo, rev)
429 ret = hg.merge(repo, rev)
430 if ret:
430 if ret:
431 raise util.Abort(_("update returned %d") % ret)
431 raise util.Abort(_("update returned %d") % ret)
432 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
432 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
433 if n == None:
433 if n is None:
434 raise util.Abort(_("repo commit failed"))
434 raise util.Abort(_("repo commit failed"))
435 try:
435 try:
436 ph = mergeq.readheaders(patch)
436 ph = mergeq.readheaders(patch)
437 except:
437 except:
438 raise util.Abort(_("unable to read %s") % patch)
438 raise util.Abort(_("unable to read %s") % patch)
439
439
440 patchf = self.opener(patch, "w")
440 patchf = self.opener(patch, "w")
441 comments = str(ph)
441 comments = str(ph)
442 if comments:
442 if comments:
443 patchf.write(comments)
443 patchf.write(comments)
444 self.printdiff(repo, head, n, fp=patchf)
444 self.printdiff(repo, head, n, fp=patchf)
445 patchf.close()
445 patchf.close()
446 self.removeundo(repo)
446 self.removeundo(repo)
447 return (0, n)
447 return (0, n)
448
448
449 def qparents(self, repo, rev=None):
449 def qparents(self, repo, rev=None):
450 if rev is None:
450 if rev is None:
451 (p1, p2) = repo.dirstate.parents()
451 (p1, p2) = repo.dirstate.parents()
452 if p2 == nullid:
452 if p2 == nullid:
453 return p1
453 return p1
454 if len(self.applied) == 0:
454 if len(self.applied) == 0:
455 return None
455 return None
456 return bin(self.applied[-1].rev)
456 return bin(self.applied[-1].rev)
457 pp = repo.changelog.parents(rev)
457 pp = repo.changelog.parents(rev)
458 if pp[1] != nullid:
458 if pp[1] != nullid:
459 arevs = [ x.rev for x in self.applied ]
459 arevs = [ x.rev for x in self.applied ]
460 p0 = hex(pp[0])
460 p0 = hex(pp[0])
461 p1 = hex(pp[1])
461 p1 = hex(pp[1])
462 if p0 in arevs:
462 if p0 in arevs:
463 return pp[0]
463 return pp[0]
464 if p1 in arevs:
464 if p1 in arevs:
465 return pp[1]
465 return pp[1]
466 return pp[0]
466 return pp[0]
467
467
468 def mergepatch(self, repo, mergeq, series):
468 def mergepatch(self, repo, mergeq, series):
469 if len(self.applied) == 0:
469 if len(self.applied) == 0:
470 # each of the patches merged in will have two parents. This
470 # each of the patches merged in will have two parents. This
471 # can confuse the qrefresh, qdiff, and strip code because it
471 # can confuse the qrefresh, qdiff, and strip code because it
472 # needs to know which parent is actually in the patch queue.
472 # needs to know which parent is actually in the patch queue.
473 # so, we insert a merge marker with only one parent. This way
473 # so, we insert a merge marker with only one parent. This way
474 # the first patch in the queue is never a merge patch
474 # the first patch in the queue is never a merge patch
475 #
475 #
476 pname = ".hg.patches.merge.marker"
476 pname = ".hg.patches.merge.marker"
477 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
477 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
478 self.removeundo(repo)
478 self.removeundo(repo)
479 self.applied.append(statusentry(hex(n), pname))
479 self.applied.append(statusentry(hex(n), pname))
480 self.applied_dirty = 1
480 self.applied_dirty = 1
481
481
482 head = self.qparents(repo)
482 head = self.qparents(repo)
483
483
484 for patch in series:
484 for patch in series:
485 patch = mergeq.lookup(patch, strict=True)
485 patch = mergeq.lookup(patch, strict=True)
486 if not patch:
486 if not patch:
487 self.ui.warn(_("patch %s does not exist\n") % patch)
487 self.ui.warn(_("patch %s does not exist\n") % patch)
488 return (1, None)
488 return (1, None)
489 pushable, reason = self.pushable(patch)
489 pushable, reason = self.pushable(patch)
490 if not pushable:
490 if not pushable:
491 self.explain_pushable(patch, all_patches=True)
491 self.explain_pushable(patch, all_patches=True)
492 continue
492 continue
493 info = mergeq.isapplied(patch)
493 info = mergeq.isapplied(patch)
494 if not info:
494 if not info:
495 self.ui.warn(_("patch %s is not applied\n") % patch)
495 self.ui.warn(_("patch %s is not applied\n") % patch)
496 return (1, None)
496 return (1, None)
497 rev = bin(info[1])
497 rev = bin(info[1])
498 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
498 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
499 if head:
499 if head:
500 self.applied.append(statusentry(hex(head), patch))
500 self.applied.append(statusentry(hex(head), patch))
501 self.applied_dirty = 1
501 self.applied_dirty = 1
502 if err:
502 if err:
503 return (err, head)
503 return (err, head)
504 self.save_dirty()
504 self.save_dirty()
505 return (0, head)
505 return (0, head)
506
506
507 def patch(self, repo, patchfile):
507 def patch(self, repo, patchfile):
508 '''Apply patchfile to the working directory.
508 '''Apply patchfile to the working directory.
509 patchfile: file name of patch'''
509 patchfile: file name of patch'''
510 files = {}
510 files = {}
511 try:
511 try:
512 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
512 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
513 files=files)
513 files=files)
514 except Exception, inst:
514 except Exception, inst:
515 self.ui.note(str(inst) + '\n')
515 self.ui.note(str(inst) + '\n')
516 if not self.ui.verbose:
516 if not self.ui.verbose:
517 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
517 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
518 return (False, files, False)
518 return (False, files, False)
519
519
520 return (True, files, fuzz)
520 return (True, files, fuzz)
521
521
522 def apply(self, repo, series, list=False, update_status=True,
522 def apply(self, repo, series, list=False, update_status=True,
523 strict=False, patchdir=None, merge=None, all_files={}):
523 strict=False, patchdir=None, merge=None, all_files={}):
524 wlock = lock = tr = None
524 wlock = lock = tr = None
525 try:
525 try:
526 wlock = repo.wlock()
526 wlock = repo.wlock()
527 lock = repo.lock()
527 lock = repo.lock()
528 tr = repo.transaction()
528 tr = repo.transaction()
529 try:
529 try:
530 ret = self._apply(repo, series, list, update_status,
530 ret = self._apply(repo, series, list, update_status,
531 strict, patchdir, merge, all_files=all_files)
531 strict, patchdir, merge, all_files=all_files)
532 tr.close()
532 tr.close()
533 self.save_dirty()
533 self.save_dirty()
534 return ret
534 return ret
535 except:
535 except:
536 try:
536 try:
537 tr.abort()
537 tr.abort()
538 finally:
538 finally:
539 repo.invalidate()
539 repo.invalidate()
540 repo.dirstate.invalidate()
540 repo.dirstate.invalidate()
541 raise
541 raise
542 finally:
542 finally:
543 del tr
543 del tr
544 release(lock, wlock)
544 release(lock, wlock)
545 self.removeundo(repo)
545 self.removeundo(repo)
546
546
547 def _apply(self, repo, series, list=False, update_status=True,
547 def _apply(self, repo, series, list=False, update_status=True,
548 strict=False, patchdir=None, merge=None, all_files={}):
548 strict=False, patchdir=None, merge=None, all_files={}):
549 # TODO unify with commands.py
549 # TODO unify with commands.py
550 if not patchdir:
550 if not patchdir:
551 patchdir = self.path
551 patchdir = self.path
552 err = 0
552 err = 0
553 n = None
553 n = None
554 for patchname in series:
554 for patchname in series:
555 pushable, reason = self.pushable(patchname)
555 pushable, reason = self.pushable(patchname)
556 if not pushable:
556 if not pushable:
557 self.explain_pushable(patchname, all_patches=True)
557 self.explain_pushable(patchname, all_patches=True)
558 continue
558 continue
559 self.ui.warn(_("applying %s\n") % patchname)
559 self.ui.warn(_("applying %s\n") % patchname)
560 pf = os.path.join(patchdir, patchname)
560 pf = os.path.join(patchdir, patchname)
561
561
562 try:
562 try:
563 ph = self.readheaders(patchname)
563 ph = self.readheaders(patchname)
564 except:
564 except:
565 self.ui.warn(_("Unable to read %s\n") % patchname)
565 self.ui.warn(_("Unable to read %s\n") % patchname)
566 err = 1
566 err = 1
567 break
567 break
568
568
569 message = ph.message
569 message = ph.message
570 if not message:
570 if not message:
571 message = _("imported patch %s\n") % patchname
571 message = _("imported patch %s\n") % patchname
572 else:
572 else:
573 if list:
573 if list:
574 message.append(_("\nimported patch %s") % patchname)
574 message.append(_("\nimported patch %s") % patchname)
575 message = '\n'.join(message)
575 message = '\n'.join(message)
576
576
577 if ph.haspatch:
577 if ph.haspatch:
578 (patcherr, files, fuzz) = self.patch(repo, pf)
578 (patcherr, files, fuzz) = self.patch(repo, pf)
579 all_files.update(files)
579 all_files.update(files)
580 patcherr = not patcherr
580 patcherr = not patcherr
581 else:
581 else:
582 self.ui.warn(_("patch %s is empty\n") % patchname)
582 self.ui.warn(_("patch %s is empty\n") % patchname)
583 patcherr, files, fuzz = 0, [], 0
583 patcherr, files, fuzz = 0, [], 0
584
584
585 if merge and files:
585 if merge and files:
586 # Mark as removed/merged and update dirstate parent info
586 # Mark as removed/merged and update dirstate parent info
587 removed = []
587 removed = []
588 merged = []
588 merged = []
589 for f in files:
589 for f in files:
590 if os.path.exists(repo.wjoin(f)):
590 if os.path.exists(repo.wjoin(f)):
591 merged.append(f)
591 merged.append(f)
592 else:
592 else:
593 removed.append(f)
593 removed.append(f)
594 for f in removed:
594 for f in removed:
595 repo.dirstate.remove(f)
595 repo.dirstate.remove(f)
596 for f in merged:
596 for f in merged:
597 repo.dirstate.merge(f)
597 repo.dirstate.merge(f)
598 p1, p2 = repo.dirstate.parents()
598 p1, p2 = repo.dirstate.parents()
599 repo.dirstate.setparents(p1, merge)
599 repo.dirstate.setparents(p1, merge)
600
600
601 files = patch.updatedir(self.ui, repo, files)
601 files = patch.updatedir(self.ui, repo, files)
602 match = cmdutil.matchfiles(repo, files or [])
602 match = cmdutil.matchfiles(repo, files or [])
603 n = repo.commit(files, message, ph.user, ph.date, match=match,
603 n = repo.commit(files, message, ph.user, ph.date, match=match,
604 force=True)
604 force=True)
605
605
606 if n == None:
606 if n is None:
607 raise util.Abort(_("repo commit failed"))
607 raise util.Abort(_("repo commit failed"))
608
608
609 if update_status:
609 if update_status:
610 self.applied.append(statusentry(hex(n), patchname))
610 self.applied.append(statusentry(hex(n), patchname))
611
611
612 if patcherr:
612 if patcherr:
613 self.ui.warn(_("patch failed, rejects left in working dir\n"))
613 self.ui.warn(_("patch failed, rejects left in working dir\n"))
614 err = 1
614 err = 1
615 break
615 break
616
616
617 if fuzz and strict:
617 if fuzz and strict:
618 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
618 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
619 err = 1
619 err = 1
620 break
620 break
621 return (err, n)
621 return (err, n)
622
622
623 def _clean_series(self, patches):
623 def _clean_series(self, patches):
624 for i in sorted([self.find_series(p) for p in patches], reverse=True):
624 for i in sorted([self.find_series(p) for p in patches], reverse=True):
625 del self.full_series[i]
625 del self.full_series[i]
626 self.parse_series()
626 self.parse_series()
627 self.series_dirty = 1
627 self.series_dirty = 1
628
628
629 def finish(self, repo, revs):
629 def finish(self, repo, revs):
630 firstrev = repo[self.applied[0].rev].rev()
630 firstrev = repo[self.applied[0].rev].rev()
631 appliedbase = 0
631 appliedbase = 0
632 patches = []
632 patches = []
633 for rev in sorted(revs):
633 for rev in sorted(revs):
634 if rev < firstrev:
634 if rev < firstrev:
635 raise util.Abort(_('revision %d is not managed') % rev)
635 raise util.Abort(_('revision %d is not managed') % rev)
636 base = bin(self.applied[appliedbase].rev)
636 base = bin(self.applied[appliedbase].rev)
637 node = repo.changelog.node(rev)
637 node = repo.changelog.node(rev)
638 if node != base:
638 if node != base:
639 raise util.Abort(_('cannot delete revision %d above '
639 raise util.Abort(_('cannot delete revision %d above '
640 'applied patches') % rev)
640 'applied patches') % rev)
641 patches.append(self.applied[appliedbase].name)
641 patches.append(self.applied[appliedbase].name)
642 appliedbase += 1
642 appliedbase += 1
643
643
644 r = self.qrepo()
644 r = self.qrepo()
645 if r:
645 if r:
646 r.remove(patches, True)
646 r.remove(patches, True)
647 else:
647 else:
648 for p in patches:
648 for p in patches:
649 os.unlink(self.join(p))
649 os.unlink(self.join(p))
650
650
651 del self.applied[:appliedbase]
651 del self.applied[:appliedbase]
652 self.applied_dirty = 1
652 self.applied_dirty = 1
653 self._clean_series(patches)
653 self._clean_series(patches)
654
654
655 def delete(self, repo, patches, opts):
655 def delete(self, repo, patches, opts):
656 if not patches and not opts.get('rev'):
656 if not patches and not opts.get('rev'):
657 raise util.Abort(_('qdelete requires at least one revision or '
657 raise util.Abort(_('qdelete requires at least one revision or '
658 'patch name'))
658 'patch name'))
659
659
660 realpatches = []
660 realpatches = []
661 for patch in patches:
661 for patch in patches:
662 patch = self.lookup(patch, strict=True)
662 patch = self.lookup(patch, strict=True)
663 info = self.isapplied(patch)
663 info = self.isapplied(patch)
664 if info:
664 if info:
665 raise util.Abort(_("cannot delete applied patch %s") % patch)
665 raise util.Abort(_("cannot delete applied patch %s") % patch)
666 if patch not in self.series:
666 if patch not in self.series:
667 raise util.Abort(_("patch %s not in series file") % patch)
667 raise util.Abort(_("patch %s not in series file") % patch)
668 realpatches.append(patch)
668 realpatches.append(patch)
669
669
670 appliedbase = 0
670 appliedbase = 0
671 if opts.get('rev'):
671 if opts.get('rev'):
672 if not self.applied:
672 if not self.applied:
673 raise util.Abort(_('no patches applied'))
673 raise util.Abort(_('no patches applied'))
674 revs = cmdutil.revrange(repo, opts['rev'])
674 revs = cmdutil.revrange(repo, opts['rev'])
675 if len(revs) > 1 and revs[0] > revs[1]:
675 if len(revs) > 1 and revs[0] > revs[1]:
676 revs.reverse()
676 revs.reverse()
677 for rev in revs:
677 for rev in revs:
678 if appliedbase >= len(self.applied):
678 if appliedbase >= len(self.applied):
679 raise util.Abort(_("revision %d is not managed") % rev)
679 raise util.Abort(_("revision %d is not managed") % rev)
680
680
681 base = bin(self.applied[appliedbase].rev)
681 base = bin(self.applied[appliedbase].rev)
682 node = repo.changelog.node(rev)
682 node = repo.changelog.node(rev)
683 if node != base:
683 if node != base:
684 raise util.Abort(_("cannot delete revision %d above "
684 raise util.Abort(_("cannot delete revision %d above "
685 "applied patches") % rev)
685 "applied patches") % rev)
686 realpatches.append(self.applied[appliedbase].name)
686 realpatches.append(self.applied[appliedbase].name)
687 appliedbase += 1
687 appliedbase += 1
688
688
689 if not opts.get('keep'):
689 if not opts.get('keep'):
690 r = self.qrepo()
690 r = self.qrepo()
691 if r:
691 if r:
692 r.remove(realpatches, True)
692 r.remove(realpatches, True)
693 else:
693 else:
694 for p in realpatches:
694 for p in realpatches:
695 os.unlink(self.join(p))
695 os.unlink(self.join(p))
696
696
697 if appliedbase:
697 if appliedbase:
698 del self.applied[:appliedbase]
698 del self.applied[:appliedbase]
699 self.applied_dirty = 1
699 self.applied_dirty = 1
700 self._clean_series(realpatches)
700 self._clean_series(realpatches)
701
701
702 def check_toppatch(self, repo):
702 def check_toppatch(self, repo):
703 if len(self.applied) > 0:
703 if len(self.applied) > 0:
704 top = bin(self.applied[-1].rev)
704 top = bin(self.applied[-1].rev)
705 pp = repo.dirstate.parents()
705 pp = repo.dirstate.parents()
706 if top not in pp:
706 if top not in pp:
707 raise util.Abort(_("working directory revision is not qtip"))
707 raise util.Abort(_("working directory revision is not qtip"))
708 return top
708 return top
709 return None
709 return None
710 def check_localchanges(self, repo, force=False, refresh=True):
710 def check_localchanges(self, repo, force=False, refresh=True):
711 m, a, r, d = repo.status()[:4]
711 m, a, r, d = repo.status()[:4]
712 if m or a or r or d:
712 if m or a or r or d:
713 if not force:
713 if not force:
714 if refresh:
714 if refresh:
715 raise util.Abort(_("local changes found, refresh first"))
715 raise util.Abort(_("local changes found, refresh first"))
716 else:
716 else:
717 raise util.Abort(_("local changes found"))
717 raise util.Abort(_("local changes found"))
718 return m, a, r, d
718 return m, a, r, d
719
719
720 _reserved = ('series', 'status', 'guards')
720 _reserved = ('series', 'status', 'guards')
721 def check_reserved_name(self, name):
721 def check_reserved_name(self, name):
722 if (name in self._reserved or name.startswith('.hg')
722 if (name in self._reserved or name.startswith('.hg')
723 or name.startswith('.mq')):
723 or name.startswith('.mq')):
724 raise util.Abort(_('"%s" cannot be used as the name of a patch')
724 raise util.Abort(_('"%s" cannot be used as the name of a patch')
725 % name)
725 % name)
726
726
727 def new(self, repo, patchfn, *pats, **opts):
727 def new(self, repo, patchfn, *pats, **opts):
728 """options:
728 """options:
729 msg: a string or a no-argument function returning a string
729 msg: a string or a no-argument function returning a string
730 """
730 """
731 msg = opts.get('msg')
731 msg = opts.get('msg')
732 force = opts.get('force')
732 force = opts.get('force')
733 user = opts.get('user')
733 user = opts.get('user')
734 date = opts.get('date')
734 date = opts.get('date')
735 if date:
735 if date:
736 date = util.parsedate(date)
736 date = util.parsedate(date)
737 self.check_reserved_name(patchfn)
737 self.check_reserved_name(patchfn)
738 if os.path.exists(self.join(patchfn)):
738 if os.path.exists(self.join(patchfn)):
739 raise util.Abort(_('patch "%s" already exists') % patchfn)
739 raise util.Abort(_('patch "%s" already exists') % patchfn)
740 if opts.get('include') or opts.get('exclude') or pats:
740 if opts.get('include') or opts.get('exclude') or pats:
741 match = cmdutil.match(repo, pats, opts)
741 match = cmdutil.match(repo, pats, opts)
742 # detect missing files in pats
742 # detect missing files in pats
743 def badfn(f, msg):
743 def badfn(f, msg):
744 raise util.Abort('%s: %s' % (f, msg))
744 raise util.Abort('%s: %s' % (f, msg))
745 match.bad = badfn
745 match.bad = badfn
746 m, a, r, d = repo.status(match=match)[:4]
746 m, a, r, d = repo.status(match=match)[:4]
747 else:
747 else:
748 m, a, r, d = self.check_localchanges(repo, force)
748 m, a, r, d = self.check_localchanges(repo, force)
749 match = cmdutil.matchfiles(repo, m + a + r)
749 match = cmdutil.matchfiles(repo, m + a + r)
750 commitfiles = m + a + r
750 commitfiles = m + a + r
751 self.check_toppatch(repo)
751 self.check_toppatch(repo)
752 insert = self.full_series_end()
752 insert = self.full_series_end()
753 wlock = repo.wlock()
753 wlock = repo.wlock()
754 try:
754 try:
755 # if patch file write fails, abort early
755 # if patch file write fails, abort early
756 p = self.opener(patchfn, "w")
756 p = self.opener(patchfn, "w")
757 try:
757 try:
758 if date:
758 if date:
759 p.write("# HG changeset patch\n")
759 p.write("# HG changeset patch\n")
760 if user:
760 if user:
761 p.write("# User " + user + "\n")
761 p.write("# User " + user + "\n")
762 p.write("# Date %d %d\n\n" % date)
762 p.write("# Date %d %d\n\n" % date)
763 elif user:
763 elif user:
764 p.write("From: " + user + "\n\n")
764 p.write("From: " + user + "\n\n")
765
765
766 if hasattr(msg, '__call__'):
766 if hasattr(msg, '__call__'):
767 msg = msg()
767 msg = msg()
768 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
768 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
769 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
769 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
770 if n == None:
770 if n is None:
771 raise util.Abort(_("repo commit failed"))
771 raise util.Abort(_("repo commit failed"))
772 try:
772 try:
773 self.full_series[insert:insert] = [patchfn]
773 self.full_series[insert:insert] = [patchfn]
774 self.applied.append(statusentry(hex(n), patchfn))
774 self.applied.append(statusentry(hex(n), patchfn))
775 self.parse_series()
775 self.parse_series()
776 self.series_dirty = 1
776 self.series_dirty = 1
777 self.applied_dirty = 1
777 self.applied_dirty = 1
778 if msg:
778 if msg:
779 msg = msg + "\n\n"
779 msg = msg + "\n\n"
780 p.write(msg)
780 p.write(msg)
781 if commitfiles:
781 if commitfiles:
782 diffopts = self.diffopts()
782 diffopts = self.diffopts()
783 if opts.get('git'): diffopts.git = True
783 if opts.get('git'): diffopts.git = True
784 parent = self.qparents(repo, n)
784 parent = self.qparents(repo, n)
785 chunks = patch.diff(repo, node1=parent, node2=n,
785 chunks = patch.diff(repo, node1=parent, node2=n,
786 match=match, opts=diffopts)
786 match=match, opts=diffopts)
787 for chunk in chunks:
787 for chunk in chunks:
788 p.write(chunk)
788 p.write(chunk)
789 p.close()
789 p.close()
790 wlock.release()
790 wlock.release()
791 wlock = None
791 wlock = None
792 r = self.qrepo()
792 r = self.qrepo()
793 if r: r.add([patchfn])
793 if r: r.add([patchfn])
794 except:
794 except:
795 repo.rollback()
795 repo.rollback()
796 raise
796 raise
797 except Exception:
797 except Exception:
798 patchpath = self.join(patchfn)
798 patchpath = self.join(patchfn)
799 try:
799 try:
800 os.unlink(patchpath)
800 os.unlink(patchpath)
801 except:
801 except:
802 self.ui.warn(_('error unlinking %s\n') % patchpath)
802 self.ui.warn(_('error unlinking %s\n') % patchpath)
803 raise
803 raise
804 self.removeundo(repo)
804 self.removeundo(repo)
805 finally:
805 finally:
806 release(wlock)
806 release(wlock)
807
807
808 def strip(self, repo, rev, update=True, backup="all", force=None):
808 def strip(self, repo, rev, update=True, backup="all", force=None):
809 wlock = lock = None
809 wlock = lock = None
810 try:
810 try:
811 wlock = repo.wlock()
811 wlock = repo.wlock()
812 lock = repo.lock()
812 lock = repo.lock()
813
813
814 if update:
814 if update:
815 self.check_localchanges(repo, force=force, refresh=False)
815 self.check_localchanges(repo, force=force, refresh=False)
816 urev = self.qparents(repo, rev)
816 urev = self.qparents(repo, rev)
817 hg.clean(repo, urev)
817 hg.clean(repo, urev)
818 repo.dirstate.write()
818 repo.dirstate.write()
819
819
820 self.removeundo(repo)
820 self.removeundo(repo)
821 repair.strip(self.ui, repo, rev, backup)
821 repair.strip(self.ui, repo, rev, backup)
822 # strip may have unbundled a set of backed up revisions after
822 # strip may have unbundled a set of backed up revisions after
823 # the actual strip
823 # the actual strip
824 self.removeundo(repo)
824 self.removeundo(repo)
825 finally:
825 finally:
826 release(lock, wlock)
826 release(lock, wlock)
827
827
828 def isapplied(self, patch):
828 def isapplied(self, patch):
829 """returns (index, rev, patch)"""
829 """returns (index, rev, patch)"""
830 for i in xrange(len(self.applied)):
830 for i in xrange(len(self.applied)):
831 a = self.applied[i]
831 a = self.applied[i]
832 if a.name == patch:
832 if a.name == patch:
833 return (i, a.rev, a.name)
833 return (i, a.rev, a.name)
834 return None
834 return None
835
835
836 # if the exact patch name does not exist, we try a few
836 # if the exact patch name does not exist, we try a few
837 # variations. If strict is passed, we try only #1
837 # variations. If strict is passed, we try only #1
838 #
838 #
839 # 1) a number to indicate an offset in the series file
839 # 1) a number to indicate an offset in the series file
840 # 2) a unique substring of the patch name was given
840 # 2) a unique substring of the patch name was given
841 # 3) patchname[-+]num to indicate an offset in the series file
841 # 3) patchname[-+]num to indicate an offset in the series file
842 def lookup(self, patch, strict=False):
842 def lookup(self, patch, strict=False):
843 patch = patch and str(patch)
843 patch = patch and str(patch)
844
844
845 def partial_name(s):
845 def partial_name(s):
846 if s in self.series:
846 if s in self.series:
847 return s
847 return s
848 matches = [x for x in self.series if s in x]
848 matches = [x for x in self.series if s in x]
849 if len(matches) > 1:
849 if len(matches) > 1:
850 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
850 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
851 for m in matches:
851 for m in matches:
852 self.ui.warn(' %s\n' % m)
852 self.ui.warn(' %s\n' % m)
853 return None
853 return None
854 if matches:
854 if matches:
855 return matches[0]
855 return matches[0]
856 if len(self.series) > 0 and len(self.applied) > 0:
856 if len(self.series) > 0 and len(self.applied) > 0:
857 if s == 'qtip':
857 if s == 'qtip':
858 return self.series[self.series_end(True)-1]
858 return self.series[self.series_end(True)-1]
859 if s == 'qbase':
859 if s == 'qbase':
860 return self.series[0]
860 return self.series[0]
861 return None
861 return None
862
862
863 if patch == None:
863 if patch is None:
864 return None
864 return None
865 if patch in self.series:
865 if patch in self.series:
866 return patch
866 return patch
867
867
868 if not os.path.isfile(self.join(patch)):
868 if not os.path.isfile(self.join(patch)):
869 try:
869 try:
870 sno = int(patch)
870 sno = int(patch)
871 except(ValueError, OverflowError):
871 except(ValueError, OverflowError):
872 pass
872 pass
873 else:
873 else:
874 if -len(self.series) <= sno < len(self.series):
874 if -len(self.series) <= sno < len(self.series):
875 return self.series[sno]
875 return self.series[sno]
876
876
877 if not strict:
877 if not strict:
878 res = partial_name(patch)
878 res = partial_name(patch)
879 if res:
879 if res:
880 return res
880 return res
881 minus = patch.rfind('-')
881 minus = patch.rfind('-')
882 if minus >= 0:
882 if minus >= 0:
883 res = partial_name(patch[:minus])
883 res = partial_name(patch[:minus])
884 if res:
884 if res:
885 i = self.series.index(res)
885 i = self.series.index(res)
886 try:
886 try:
887 off = int(patch[minus+1:] or 1)
887 off = int(patch[minus+1:] or 1)
888 except(ValueError, OverflowError):
888 except(ValueError, OverflowError):
889 pass
889 pass
890 else:
890 else:
891 if i - off >= 0:
891 if i - off >= 0:
892 return self.series[i - off]
892 return self.series[i - off]
893 plus = patch.rfind('+')
893 plus = patch.rfind('+')
894 if plus >= 0:
894 if plus >= 0:
895 res = partial_name(patch[:plus])
895 res = partial_name(patch[:plus])
896 if res:
896 if res:
897 i = self.series.index(res)
897 i = self.series.index(res)
898 try:
898 try:
899 off = int(patch[plus+1:] or 1)
899 off = int(patch[plus+1:] or 1)
900 except(ValueError, OverflowError):
900 except(ValueError, OverflowError):
901 pass
901 pass
902 else:
902 else:
903 if i + off < len(self.series):
903 if i + off < len(self.series):
904 return self.series[i + off]
904 return self.series[i + off]
905 raise util.Abort(_("patch %s not in series") % patch)
905 raise util.Abort(_("patch %s not in series") % patch)
906
906
907 def push(self, repo, patch=None, force=False, list=False,
907 def push(self, repo, patch=None, force=False, list=False,
908 mergeq=None, all=False):
908 mergeq=None, all=False):
909 wlock = repo.wlock()
909 wlock = repo.wlock()
910 if repo.dirstate.parents()[0] not in repo.heads():
910 if repo.dirstate.parents()[0] not in repo.heads():
911 self.ui.status(_("(working directory not at a head)\n"))
911 self.ui.status(_("(working directory not at a head)\n"))
912
912
913 if not self.series:
913 if not self.series:
914 self.ui.warn(_('no patches in series\n'))
914 self.ui.warn(_('no patches in series\n'))
915 return 0
915 return 0
916
916
917 try:
917 try:
918 patch = self.lookup(patch)
918 patch = self.lookup(patch)
919 # Suppose our series file is: A B C and the current 'top'
919 # Suppose our series file is: A B C and the current 'top'
920 # patch is B. qpush C should be performed (moving forward)
920 # patch is B. qpush C should be performed (moving forward)
921 # qpush B is a NOP (no change) qpush A is an error (can't
921 # qpush B is a NOP (no change) qpush A is an error (can't
922 # go backwards with qpush)
922 # go backwards with qpush)
923 if patch:
923 if patch:
924 info = self.isapplied(patch)
924 info = self.isapplied(patch)
925 if info:
925 if info:
926 if info[0] < len(self.applied) - 1:
926 if info[0] < len(self.applied) - 1:
927 raise util.Abort(
927 raise util.Abort(
928 _("cannot push to a previous patch: %s") % patch)
928 _("cannot push to a previous patch: %s") % patch)
929 self.ui.warn(
929 self.ui.warn(
930 _('qpush: %s is already at the top\n') % patch)
930 _('qpush: %s is already at the top\n') % patch)
931 return
931 return
932 pushable, reason = self.pushable(patch)
932 pushable, reason = self.pushable(patch)
933 if not pushable:
933 if not pushable:
934 if reason:
934 if reason:
935 reason = _('guarded by %r') % reason
935 reason = _('guarded by %r') % reason
936 else:
936 else:
937 reason = _('no matching guards')
937 reason = _('no matching guards')
938 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
938 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
939 return 1
939 return 1
940 elif all:
940 elif all:
941 patch = self.series[-1]
941 patch = self.series[-1]
942 if self.isapplied(patch):
942 if self.isapplied(patch):
943 self.ui.warn(_('all patches are currently applied\n'))
943 self.ui.warn(_('all patches are currently applied\n'))
944 return 0
944 return 0
945
945
946 # Following the above example, starting at 'top' of B:
946 # Following the above example, starting at 'top' of B:
947 # qpush should be performed (pushes C), but a subsequent
947 # qpush should be performed (pushes C), but a subsequent
948 # qpush without an argument is an error (nothing to
948 # qpush without an argument is an error (nothing to
949 # apply). This allows a loop of "...while hg qpush..." to
949 # apply). This allows a loop of "...while hg qpush..." to
950 # work as it detects an error when done
950 # work as it detects an error when done
951 start = self.series_end()
951 start = self.series_end()
952 if start == len(self.series):
952 if start == len(self.series):
953 self.ui.warn(_('patch series already fully applied\n'))
953 self.ui.warn(_('patch series already fully applied\n'))
954 return 1
954 return 1
955 if not force:
955 if not force:
956 self.check_localchanges(repo)
956 self.check_localchanges(repo)
957
957
958 self.applied_dirty = 1
958 self.applied_dirty = 1
959 if start > 0:
959 if start > 0:
960 self.check_toppatch(repo)
960 self.check_toppatch(repo)
961 if not patch:
961 if not patch:
962 patch = self.series[start]
962 patch = self.series[start]
963 end = start + 1
963 end = start + 1
964 else:
964 else:
965 end = self.series.index(patch, start) + 1
965 end = self.series.index(patch, start) + 1
966 s = self.series[start:end]
966 s = self.series[start:end]
967 all_files = {}
967 all_files = {}
968 try:
968 try:
969 if mergeq:
969 if mergeq:
970 ret = self.mergepatch(repo, mergeq, s)
970 ret = self.mergepatch(repo, mergeq, s)
971 else:
971 else:
972 ret = self.apply(repo, s, list, all_files=all_files)
972 ret = self.apply(repo, s, list, all_files=all_files)
973 except:
973 except:
974 self.ui.warn(_('cleaning up working directory...'))
974 self.ui.warn(_('cleaning up working directory...'))
975 node = repo.dirstate.parents()[0]
975 node = repo.dirstate.parents()[0]
976 hg.revert(repo, node, None)
976 hg.revert(repo, node, None)
977 unknown = repo.status(unknown=True)[4]
977 unknown = repo.status(unknown=True)[4]
978 # only remove unknown files that we know we touched or
978 # only remove unknown files that we know we touched or
979 # created while patching
979 # created while patching
980 for f in unknown:
980 for f in unknown:
981 if f in all_files:
981 if f in all_files:
982 util.unlink(repo.wjoin(f))
982 util.unlink(repo.wjoin(f))
983 self.ui.warn(_('done\n'))
983 self.ui.warn(_('done\n'))
984 raise
984 raise
985 top = self.applied[-1].name
985 top = self.applied[-1].name
986 if ret[0]:
986 if ret[0]:
987 self.ui.write(_("errors during apply, please fix and "
987 self.ui.write(_("errors during apply, please fix and "
988 "refresh %s\n") % top)
988 "refresh %s\n") % top)
989 else:
989 else:
990 self.ui.write(_("now at: %s\n") % top)
990 self.ui.write(_("now at: %s\n") % top)
991 return ret[0]
991 return ret[0]
992 finally:
992 finally:
993 wlock.release()
993 wlock.release()
994
994
995 def pop(self, repo, patch=None, force=False, update=True, all=False):
995 def pop(self, repo, patch=None, force=False, update=True, all=False):
996 def getfile(f, rev, flags):
996 def getfile(f, rev, flags):
997 t = repo.file(f).read(rev)
997 t = repo.file(f).read(rev)
998 repo.wwrite(f, t, flags)
998 repo.wwrite(f, t, flags)
999
999
1000 wlock = repo.wlock()
1000 wlock = repo.wlock()
1001 try:
1001 try:
1002 if patch:
1002 if patch:
1003 # index, rev, patch
1003 # index, rev, patch
1004 info = self.isapplied(patch)
1004 info = self.isapplied(patch)
1005 if not info:
1005 if not info:
1006 patch = self.lookup(patch)
1006 patch = self.lookup(patch)
1007 info = self.isapplied(patch)
1007 info = self.isapplied(patch)
1008 if not info:
1008 if not info:
1009 raise util.Abort(_("patch %s is not applied") % patch)
1009 raise util.Abort(_("patch %s is not applied") % patch)
1010
1010
1011 if len(self.applied) == 0:
1011 if len(self.applied) == 0:
1012 # Allow qpop -a to work repeatedly,
1012 # Allow qpop -a to work repeatedly,
1013 # but not qpop without an argument
1013 # but not qpop without an argument
1014 self.ui.warn(_("no patches applied\n"))
1014 self.ui.warn(_("no patches applied\n"))
1015 return not all
1015 return not all
1016
1016
1017 if all:
1017 if all:
1018 start = 0
1018 start = 0
1019 elif patch:
1019 elif patch:
1020 start = info[0] + 1
1020 start = info[0] + 1
1021 else:
1021 else:
1022 start = len(self.applied) - 1
1022 start = len(self.applied) - 1
1023
1023
1024 if start >= len(self.applied):
1024 if start >= len(self.applied):
1025 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1025 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1026 return
1026 return
1027
1027
1028 if not update:
1028 if not update:
1029 parents = repo.dirstate.parents()
1029 parents = repo.dirstate.parents()
1030 rr = [ bin(x.rev) for x in self.applied ]
1030 rr = [ bin(x.rev) for x in self.applied ]
1031 for p in parents:
1031 for p in parents:
1032 if p in rr:
1032 if p in rr:
1033 self.ui.warn(_("qpop: forcing dirstate update\n"))
1033 self.ui.warn(_("qpop: forcing dirstate update\n"))
1034 update = True
1034 update = True
1035 else:
1035 else:
1036 parents = [p.hex() for p in repo[None].parents()]
1036 parents = [p.hex() for p in repo[None].parents()]
1037 needupdate = False
1037 needupdate = False
1038 for entry in self.applied[start:]:
1038 for entry in self.applied[start:]:
1039 if entry.rev in parents:
1039 if entry.rev in parents:
1040 needupdate = True
1040 needupdate = True
1041 break
1041 break
1042 update = needupdate
1042 update = needupdate
1043
1043
1044 if not force and update:
1044 if not force and update:
1045 self.check_localchanges(repo)
1045 self.check_localchanges(repo)
1046
1046
1047 self.applied_dirty = 1
1047 self.applied_dirty = 1
1048 end = len(self.applied)
1048 end = len(self.applied)
1049 rev = bin(self.applied[start].rev)
1049 rev = bin(self.applied[start].rev)
1050 if update:
1050 if update:
1051 top = self.check_toppatch(repo)
1051 top = self.check_toppatch(repo)
1052
1052
1053 try:
1053 try:
1054 heads = repo.changelog.heads(rev)
1054 heads = repo.changelog.heads(rev)
1055 except error.LookupError:
1055 except error.LookupError:
1056 node = short(rev)
1056 node = short(rev)
1057 raise util.Abort(_('trying to pop unknown node %s') % node)
1057 raise util.Abort(_('trying to pop unknown node %s') % node)
1058
1058
1059 if heads != [bin(self.applied[-1].rev)]:
1059 if heads != [bin(self.applied[-1].rev)]:
1060 raise util.Abort(_("popping would remove a revision not "
1060 raise util.Abort(_("popping would remove a revision not "
1061 "managed by this patch queue"))
1061 "managed by this patch queue"))
1062
1062
1063 # we know there are no local changes, so we can make a simplified
1063 # we know there are no local changes, so we can make a simplified
1064 # form of hg.update.
1064 # form of hg.update.
1065 if update:
1065 if update:
1066 qp = self.qparents(repo, rev)
1066 qp = self.qparents(repo, rev)
1067 changes = repo.changelog.read(qp)
1067 changes = repo.changelog.read(qp)
1068 mmap = repo.manifest.read(changes[0])
1068 mmap = repo.manifest.read(changes[0])
1069 m, a, r, d = repo.status(qp, top)[:4]
1069 m, a, r, d = repo.status(qp, top)[:4]
1070 if d:
1070 if d:
1071 raise util.Abort(_("deletions found between repo revs"))
1071 raise util.Abort(_("deletions found between repo revs"))
1072 for f in m:
1072 for f in m:
1073 getfile(f, mmap[f], mmap.flags(f))
1073 getfile(f, mmap[f], mmap.flags(f))
1074 for f in r:
1074 for f in r:
1075 getfile(f, mmap[f], mmap.flags(f))
1075 getfile(f, mmap[f], mmap.flags(f))
1076 for f in m + r:
1076 for f in m + r:
1077 repo.dirstate.normal(f)
1077 repo.dirstate.normal(f)
1078 for f in a:
1078 for f in a:
1079 try:
1079 try:
1080 os.unlink(repo.wjoin(f))
1080 os.unlink(repo.wjoin(f))
1081 except OSError, e:
1081 except OSError, e:
1082 if e.errno != errno.ENOENT:
1082 if e.errno != errno.ENOENT:
1083 raise
1083 raise
1084 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1084 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1085 except: pass
1085 except: pass
1086 repo.dirstate.forget(f)
1086 repo.dirstate.forget(f)
1087 repo.dirstate.setparents(qp, nullid)
1087 repo.dirstate.setparents(qp, nullid)
1088 del self.applied[start:end]
1088 del self.applied[start:end]
1089 self.strip(repo, rev, update=False, backup='strip')
1089 self.strip(repo, rev, update=False, backup='strip')
1090 if len(self.applied):
1090 if len(self.applied):
1091 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1091 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1092 else:
1092 else:
1093 self.ui.write(_("patch queue now empty\n"))
1093 self.ui.write(_("patch queue now empty\n"))
1094 finally:
1094 finally:
1095 wlock.release()
1095 wlock.release()
1096
1096
1097 def diff(self, repo, pats, opts):
1097 def diff(self, repo, pats, opts):
1098 top = self.check_toppatch(repo)
1098 top = self.check_toppatch(repo)
1099 if not top:
1099 if not top:
1100 self.ui.write(_("no patches applied\n"))
1100 self.ui.write(_("no patches applied\n"))
1101 return
1101 return
1102 qp = self.qparents(repo, top)
1102 qp = self.qparents(repo, top)
1103 self._diffopts = patch.diffopts(self.ui, opts)
1103 self._diffopts = patch.diffopts(self.ui, opts)
1104 self.printdiff(repo, qp, files=pats, opts=opts)
1104 self.printdiff(repo, qp, files=pats, opts=opts)
1105
1105
1106 def refresh(self, repo, pats=None, **opts):
1106 def refresh(self, repo, pats=None, **opts):
1107 if len(self.applied) == 0:
1107 if len(self.applied) == 0:
1108 self.ui.write(_("no patches applied\n"))
1108 self.ui.write(_("no patches applied\n"))
1109 return 1
1109 return 1
1110 msg = opts.get('msg', '').rstrip()
1110 msg = opts.get('msg', '').rstrip()
1111 newuser = opts.get('user')
1111 newuser = opts.get('user')
1112 newdate = opts.get('date')
1112 newdate = opts.get('date')
1113 if newdate:
1113 if newdate:
1114 newdate = '%d %d' % util.parsedate(newdate)
1114 newdate = '%d %d' % util.parsedate(newdate)
1115 wlock = repo.wlock()
1115 wlock = repo.wlock()
1116 try:
1116 try:
1117 self.check_toppatch(repo)
1117 self.check_toppatch(repo)
1118 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1118 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1119 top = bin(top)
1119 top = bin(top)
1120 if repo.changelog.heads(top) != [top]:
1120 if repo.changelog.heads(top) != [top]:
1121 raise util.Abort(_("cannot refresh a revision with children"))
1121 raise util.Abort(_("cannot refresh a revision with children"))
1122 cparents = repo.changelog.parents(top)
1122 cparents = repo.changelog.parents(top)
1123 patchparent = self.qparents(repo, top)
1123 patchparent = self.qparents(repo, top)
1124 ph = self.readheaders(patchfn)
1124 ph = self.readheaders(patchfn)
1125
1125
1126 patchf = self.opener(patchfn, 'r')
1126 patchf = self.opener(patchfn, 'r')
1127
1127
1128 # if the patch was a git patch, refresh it as a git patch
1128 # if the patch was a git patch, refresh it as a git patch
1129 for line in patchf:
1129 for line in patchf:
1130 if line.startswith('diff --git'):
1130 if line.startswith('diff --git'):
1131 self.diffopts().git = True
1131 self.diffopts().git = True
1132 break
1132 break
1133
1133
1134 if msg:
1134 if msg:
1135 ph.setmessage(msg)
1135 ph.setmessage(msg)
1136 if newuser:
1136 if newuser:
1137 ph.setuser(newuser)
1137 ph.setuser(newuser)
1138 if newdate:
1138 if newdate:
1139 ph.setdate(newdate)
1139 ph.setdate(newdate)
1140
1140
1141 # only commit new patch when write is complete
1141 # only commit new patch when write is complete
1142 patchf = self.opener(patchfn, 'w', atomictemp=True)
1142 patchf = self.opener(patchfn, 'w', atomictemp=True)
1143
1143
1144 patchf.seek(0)
1144 patchf.seek(0)
1145 patchf.truncate()
1145 patchf.truncate()
1146
1146
1147 comments = str(ph)
1147 comments = str(ph)
1148 if comments:
1148 if comments:
1149 patchf.write(comments)
1149 patchf.write(comments)
1150
1150
1151 if opts.get('git'):
1151 if opts.get('git'):
1152 self.diffopts().git = True
1152 self.diffopts().git = True
1153 tip = repo.changelog.tip()
1153 tip = repo.changelog.tip()
1154 if top == tip:
1154 if top == tip:
1155 # if the top of our patch queue is also the tip, there is an
1155 # if the top of our patch queue is also the tip, there is an
1156 # optimization here. We update the dirstate in place and strip
1156 # optimization here. We update the dirstate in place and strip
1157 # off the tip commit. Then just commit the current directory
1157 # off the tip commit. Then just commit the current directory
1158 # tree. We can also send repo.commit the list of files
1158 # tree. We can also send repo.commit the list of files
1159 # changed to speed up the diff
1159 # changed to speed up the diff
1160 #
1160 #
1161 # in short mode, we only diff the files included in the
1161 # in short mode, we only diff the files included in the
1162 # patch already plus specified files
1162 # patch already plus specified files
1163 #
1163 #
1164 # this should really read:
1164 # this should really read:
1165 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1165 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1166 # but we do it backwards to take advantage of manifest/chlog
1166 # but we do it backwards to take advantage of manifest/chlog
1167 # caching against the next repo.status call
1167 # caching against the next repo.status call
1168 #
1168 #
1169 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1169 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1170 changes = repo.changelog.read(tip)
1170 changes = repo.changelog.read(tip)
1171 man = repo.manifest.read(changes[0])
1171 man = repo.manifest.read(changes[0])
1172 aaa = aa[:]
1172 aaa = aa[:]
1173 matchfn = cmdutil.match(repo, pats, opts)
1173 matchfn = cmdutil.match(repo, pats, opts)
1174 if opts.get('short'):
1174 if opts.get('short'):
1175 # if amending a patch, we start with existing
1175 # if amending a patch, we start with existing
1176 # files plus specified files - unfiltered
1176 # files plus specified files - unfiltered
1177 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1177 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1178 # filter with inc/exl options
1178 # filter with inc/exl options
1179 matchfn = cmdutil.match(repo, opts=opts)
1179 matchfn = cmdutil.match(repo, opts=opts)
1180 else:
1180 else:
1181 match = cmdutil.matchall(repo)
1181 match = cmdutil.matchall(repo)
1182 m, a, r, d = repo.status(match=match)[:4]
1182 m, a, r, d = repo.status(match=match)[:4]
1183
1183
1184 # we might end up with files that were added between
1184 # we might end up with files that were added between
1185 # tip and the dirstate parent, but then changed in the
1185 # tip and the dirstate parent, but then changed in the
1186 # local dirstate. in this case, we want them to only
1186 # local dirstate. in this case, we want them to only
1187 # show up in the added section
1187 # show up in the added section
1188 for x in m:
1188 for x in m:
1189 if x not in aa:
1189 if x not in aa:
1190 mm.append(x)
1190 mm.append(x)
1191 # we might end up with files added by the local dirstate that
1191 # we might end up with files added by the local dirstate that
1192 # were deleted by the patch. In this case, they should only
1192 # were deleted by the patch. In this case, they should only
1193 # show up in the changed section.
1193 # show up in the changed section.
1194 for x in a:
1194 for x in a:
1195 if x in dd:
1195 if x in dd:
1196 del dd[dd.index(x)]
1196 del dd[dd.index(x)]
1197 mm.append(x)
1197 mm.append(x)
1198 else:
1198 else:
1199 aa.append(x)
1199 aa.append(x)
1200 # make sure any files deleted in the local dirstate
1200 # make sure any files deleted in the local dirstate
1201 # are not in the add or change column of the patch
1201 # are not in the add or change column of the patch
1202 forget = []
1202 forget = []
1203 for x in d + r:
1203 for x in d + r:
1204 if x in aa:
1204 if x in aa:
1205 del aa[aa.index(x)]
1205 del aa[aa.index(x)]
1206 forget.append(x)
1206 forget.append(x)
1207 continue
1207 continue
1208 elif x in mm:
1208 elif x in mm:
1209 del mm[mm.index(x)]
1209 del mm[mm.index(x)]
1210 dd.append(x)
1210 dd.append(x)
1211
1211
1212 m = list(set(mm))
1212 m = list(set(mm))
1213 r = list(set(dd))
1213 r = list(set(dd))
1214 a = list(set(aa))
1214 a = list(set(aa))
1215 c = [filter(matchfn, l) for l in (m, a, r)]
1215 c = [filter(matchfn, l) for l in (m, a, r)]
1216 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1216 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1217 chunks = patch.diff(repo, patchparent, match=match,
1217 chunks = patch.diff(repo, patchparent, match=match,
1218 changes=c, opts=self.diffopts())
1218 changes=c, opts=self.diffopts())
1219 for chunk in chunks:
1219 for chunk in chunks:
1220 patchf.write(chunk)
1220 patchf.write(chunk)
1221
1221
1222 try:
1222 try:
1223 if self.diffopts().git:
1223 if self.diffopts().git:
1224 copies = {}
1224 copies = {}
1225 for dst in a:
1225 for dst in a:
1226 src = repo.dirstate.copied(dst)
1226 src = repo.dirstate.copied(dst)
1227 # during qfold, the source file for copies may
1227 # during qfold, the source file for copies may
1228 # be removed. Treat this as a simple add.
1228 # be removed. Treat this as a simple add.
1229 if src is not None and src in repo.dirstate:
1229 if src is not None and src in repo.dirstate:
1230 copies.setdefault(src, []).append(dst)
1230 copies.setdefault(src, []).append(dst)
1231 repo.dirstate.add(dst)
1231 repo.dirstate.add(dst)
1232 # remember the copies between patchparent and tip
1232 # remember the copies between patchparent and tip
1233 for dst in aaa:
1233 for dst in aaa:
1234 f = repo.file(dst)
1234 f = repo.file(dst)
1235 src = f.renamed(man[dst])
1235 src = f.renamed(man[dst])
1236 if src:
1236 if src:
1237 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1237 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1238 if dst in a:
1238 if dst in a:
1239 copies[src[0]].append(dst)
1239 copies[src[0]].append(dst)
1240 # we can't copy a file created by the patch itself
1240 # we can't copy a file created by the patch itself
1241 if dst in copies:
1241 if dst in copies:
1242 del copies[dst]
1242 del copies[dst]
1243 for src, dsts in copies.iteritems():
1243 for src, dsts in copies.iteritems():
1244 for dst in dsts:
1244 for dst in dsts:
1245 repo.dirstate.copy(src, dst)
1245 repo.dirstate.copy(src, dst)
1246 else:
1246 else:
1247 for dst in a:
1247 for dst in a:
1248 repo.dirstate.add(dst)
1248 repo.dirstate.add(dst)
1249 # Drop useless copy information
1249 # Drop useless copy information
1250 for f in list(repo.dirstate.copies()):
1250 for f in list(repo.dirstate.copies()):
1251 repo.dirstate.copy(None, f)
1251 repo.dirstate.copy(None, f)
1252 for f in r:
1252 for f in r:
1253 repo.dirstate.remove(f)
1253 repo.dirstate.remove(f)
1254 # if the patch excludes a modified file, mark that
1254 # if the patch excludes a modified file, mark that
1255 # file with mtime=0 so status can see it.
1255 # file with mtime=0 so status can see it.
1256 mm = []
1256 mm = []
1257 for i in xrange(len(m)-1, -1, -1):
1257 for i in xrange(len(m)-1, -1, -1):
1258 if not matchfn(m[i]):
1258 if not matchfn(m[i]):
1259 mm.append(m[i])
1259 mm.append(m[i])
1260 del m[i]
1260 del m[i]
1261 for f in m:
1261 for f in m:
1262 repo.dirstate.normal(f)
1262 repo.dirstate.normal(f)
1263 for f in mm:
1263 for f in mm:
1264 repo.dirstate.normallookup(f)
1264 repo.dirstate.normallookup(f)
1265 for f in forget:
1265 for f in forget:
1266 repo.dirstate.forget(f)
1266 repo.dirstate.forget(f)
1267
1267
1268 if not msg:
1268 if not msg:
1269 if not ph.message:
1269 if not ph.message:
1270 message = "[mq]: %s\n" % patchfn
1270 message = "[mq]: %s\n" % patchfn
1271 else:
1271 else:
1272 message = "\n".join(ph.message)
1272 message = "\n".join(ph.message)
1273 else:
1273 else:
1274 message = msg
1274 message = msg
1275
1275
1276 user = ph.user or changes[1]
1276 user = ph.user or changes[1]
1277
1277
1278 # assumes strip can roll itself back if interrupted
1278 # assumes strip can roll itself back if interrupted
1279 repo.dirstate.setparents(*cparents)
1279 repo.dirstate.setparents(*cparents)
1280 self.applied.pop()
1280 self.applied.pop()
1281 self.applied_dirty = 1
1281 self.applied_dirty = 1
1282 self.strip(repo, top, update=False,
1282 self.strip(repo, top, update=False,
1283 backup='strip')
1283 backup='strip')
1284 except:
1284 except:
1285 repo.dirstate.invalidate()
1285 repo.dirstate.invalidate()
1286 raise
1286 raise
1287
1287
1288 try:
1288 try:
1289 # might be nice to attempt to roll back strip after this
1289 # might be nice to attempt to roll back strip after this
1290 patchf.rename()
1290 patchf.rename()
1291 n = repo.commit(match.files(), message, user, ph.date,
1291 n = repo.commit(match.files(), message, user, ph.date,
1292 match=match, force=1)
1292 match=match, force=1)
1293 self.applied.append(statusentry(hex(n), patchfn))
1293 self.applied.append(statusentry(hex(n), patchfn))
1294 except:
1294 except:
1295 ctx = repo[cparents[0]]
1295 ctx = repo[cparents[0]]
1296 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1296 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1297 self.save_dirty()
1297 self.save_dirty()
1298 self.ui.warn(_('refresh interrupted while patch was popped! '
1298 self.ui.warn(_('refresh interrupted while patch was popped! '
1299 '(revert --all, qpush to recover)\n'))
1299 '(revert --all, qpush to recover)\n'))
1300 raise
1300 raise
1301 else:
1301 else:
1302 self.printdiff(repo, patchparent, fp=patchf)
1302 self.printdiff(repo, patchparent, fp=patchf)
1303 patchf.rename()
1303 patchf.rename()
1304 added = repo.status()[1]
1304 added = repo.status()[1]
1305 for a in added:
1305 for a in added:
1306 f = repo.wjoin(a)
1306 f = repo.wjoin(a)
1307 try:
1307 try:
1308 os.unlink(f)
1308 os.unlink(f)
1309 except OSError, e:
1309 except OSError, e:
1310 if e.errno != errno.ENOENT:
1310 if e.errno != errno.ENOENT:
1311 raise
1311 raise
1312 try: os.removedirs(os.path.dirname(f))
1312 try: os.removedirs(os.path.dirname(f))
1313 except: pass
1313 except: pass
1314 # forget the file copies in the dirstate
1314 # forget the file copies in the dirstate
1315 # push should readd the files later on
1315 # push should readd the files later on
1316 repo.dirstate.forget(a)
1316 repo.dirstate.forget(a)
1317 self.pop(repo, force=True)
1317 self.pop(repo, force=True)
1318 self.push(repo, force=True)
1318 self.push(repo, force=True)
1319 finally:
1319 finally:
1320 wlock.release()
1320 wlock.release()
1321 self.removeundo(repo)
1321 self.removeundo(repo)
1322
1322
1323 def init(self, repo, create=False):
1323 def init(self, repo, create=False):
1324 if not create and os.path.isdir(self.path):
1324 if not create and os.path.isdir(self.path):
1325 raise util.Abort(_("patch queue directory already exists"))
1325 raise util.Abort(_("patch queue directory already exists"))
1326 try:
1326 try:
1327 os.mkdir(self.path)
1327 os.mkdir(self.path)
1328 except OSError, inst:
1328 except OSError, inst:
1329 if inst.errno != errno.EEXIST or not create:
1329 if inst.errno != errno.EEXIST or not create:
1330 raise
1330 raise
1331 if create:
1331 if create:
1332 return self.qrepo(create=True)
1332 return self.qrepo(create=True)
1333
1333
1334 def unapplied(self, repo, patch=None):
1334 def unapplied(self, repo, patch=None):
1335 if patch and patch not in self.series:
1335 if patch and patch not in self.series:
1336 raise util.Abort(_("patch %s is not in series file") % patch)
1336 raise util.Abort(_("patch %s is not in series file") % patch)
1337 if not patch:
1337 if not patch:
1338 start = self.series_end()
1338 start = self.series_end()
1339 else:
1339 else:
1340 start = self.series.index(patch) + 1
1340 start = self.series.index(patch) + 1
1341 unapplied = []
1341 unapplied = []
1342 for i in xrange(start, len(self.series)):
1342 for i in xrange(start, len(self.series)):
1343 pushable, reason = self.pushable(i)
1343 pushable, reason = self.pushable(i)
1344 if pushable:
1344 if pushable:
1345 unapplied.append((i, self.series[i]))
1345 unapplied.append((i, self.series[i]))
1346 self.explain_pushable(i)
1346 self.explain_pushable(i)
1347 return unapplied
1347 return unapplied
1348
1348
1349 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1349 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1350 summary=False):
1350 summary=False):
1351 def displayname(patchname):
1351 def displayname(patchname):
1352 if summary:
1352 if summary:
1353 ph = self.readheaders(patchname)
1353 ph = self.readheaders(patchname)
1354 msg = ph.message
1354 msg = ph.message
1355 msg = msg and ': ' + msg[0] or ': '
1355 msg = msg and ': ' + msg[0] or ': '
1356 else:
1356 else:
1357 msg = ''
1357 msg = ''
1358 return '%s%s' % (patchname, msg)
1358 return '%s%s' % (patchname, msg)
1359
1359
1360 applied = set([p.name for p in self.applied])
1360 applied = set([p.name for p in self.applied])
1361 if length is None:
1361 if length is None:
1362 length = len(self.series) - start
1362 length = len(self.series) - start
1363 if not missing:
1363 if not missing:
1364 for i in xrange(start, start+length):
1364 for i in xrange(start, start+length):
1365 patch = self.series[i]
1365 patch = self.series[i]
1366 if patch in applied:
1366 if patch in applied:
1367 stat = 'A'
1367 stat = 'A'
1368 elif self.pushable(i)[0]:
1368 elif self.pushable(i)[0]:
1369 stat = 'U'
1369 stat = 'U'
1370 else:
1370 else:
1371 stat = 'G'
1371 stat = 'G'
1372 pfx = ''
1372 pfx = ''
1373 if self.ui.verbose:
1373 if self.ui.verbose:
1374 pfx = '%d %s ' % (i, stat)
1374 pfx = '%d %s ' % (i, stat)
1375 elif status and status != stat:
1375 elif status and status != stat:
1376 continue
1376 continue
1377 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1377 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1378 else:
1378 else:
1379 msng_list = []
1379 msng_list = []
1380 for root, dirs, files in os.walk(self.path):
1380 for root, dirs, files in os.walk(self.path):
1381 d = root[len(self.path) + 1:]
1381 d = root[len(self.path) + 1:]
1382 for f in files:
1382 for f in files:
1383 fl = os.path.join(d, f)
1383 fl = os.path.join(d, f)
1384 if (fl not in self.series and
1384 if (fl not in self.series and
1385 fl not in (self.status_path, self.series_path,
1385 fl not in (self.status_path, self.series_path,
1386 self.guards_path)
1386 self.guards_path)
1387 and not fl.startswith('.')):
1387 and not fl.startswith('.')):
1388 msng_list.append(fl)
1388 msng_list.append(fl)
1389 for x in sorted(msng_list):
1389 for x in sorted(msng_list):
1390 pfx = self.ui.verbose and ('D ') or ''
1390 pfx = self.ui.verbose and ('D ') or ''
1391 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1391 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1392
1392
1393 def issaveline(self, l):
1393 def issaveline(self, l):
1394 if l.name == '.hg.patches.save.line':
1394 if l.name == '.hg.patches.save.line':
1395 return True
1395 return True
1396
1396
1397 def qrepo(self, create=False):
1397 def qrepo(self, create=False):
1398 if create or os.path.isdir(self.join(".hg")):
1398 if create or os.path.isdir(self.join(".hg")):
1399 return hg.repository(self.ui, path=self.path, create=create)
1399 return hg.repository(self.ui, path=self.path, create=create)
1400
1400
1401 def restore(self, repo, rev, delete=None, qupdate=None):
1401 def restore(self, repo, rev, delete=None, qupdate=None):
1402 c = repo.changelog.read(rev)
1402 c = repo.changelog.read(rev)
1403 desc = c[4].strip()
1403 desc = c[4].strip()
1404 lines = desc.splitlines()
1404 lines = desc.splitlines()
1405 i = 0
1405 i = 0
1406 datastart = None
1406 datastart = None
1407 series = []
1407 series = []
1408 applied = []
1408 applied = []
1409 qpp = None
1409 qpp = None
1410 for i in xrange(0, len(lines)):
1410 for i in xrange(0, len(lines)):
1411 if lines[i] == 'Patch Data:':
1411 if lines[i] == 'Patch Data:':
1412 datastart = i + 1
1412 datastart = i + 1
1413 elif lines[i].startswith('Dirstate:'):
1413 elif lines[i].startswith('Dirstate:'):
1414 l = lines[i].rstrip()
1414 l = lines[i].rstrip()
1415 l = l[10:].split(' ')
1415 l = l[10:].split(' ')
1416 qpp = [ bin(x) for x in l ]
1416 qpp = [ bin(x) for x in l ]
1417 elif datastart != None:
1417 elif datastart != None:
1418 l = lines[i].rstrip()
1418 l = lines[i].rstrip()
1419 se = statusentry(l)
1419 se = statusentry(l)
1420 file_ = se.name
1420 file_ = se.name
1421 if se.rev:
1421 if se.rev:
1422 applied.append(se)
1422 applied.append(se)
1423 else:
1423 else:
1424 series.append(file_)
1424 series.append(file_)
1425 if datastart == None:
1425 if datastart is None:
1426 self.ui.warn(_("No saved patch data found\n"))
1426 self.ui.warn(_("No saved patch data found\n"))
1427 return 1
1427 return 1
1428 self.ui.warn(_("restoring status: %s\n") % lines[0])
1428 self.ui.warn(_("restoring status: %s\n") % lines[0])
1429 self.full_series = series
1429 self.full_series = series
1430 self.applied = applied
1430 self.applied = applied
1431 self.parse_series()
1431 self.parse_series()
1432 self.series_dirty = 1
1432 self.series_dirty = 1
1433 self.applied_dirty = 1
1433 self.applied_dirty = 1
1434 heads = repo.changelog.heads()
1434 heads = repo.changelog.heads()
1435 if delete:
1435 if delete:
1436 if rev not in heads:
1436 if rev not in heads:
1437 self.ui.warn(_("save entry has children, leaving it alone\n"))
1437 self.ui.warn(_("save entry has children, leaving it alone\n"))
1438 else:
1438 else:
1439 self.ui.warn(_("removing save entry %s\n") % short(rev))
1439 self.ui.warn(_("removing save entry %s\n") % short(rev))
1440 pp = repo.dirstate.parents()
1440 pp = repo.dirstate.parents()
1441 if rev in pp:
1441 if rev in pp:
1442 update = True
1442 update = True
1443 else:
1443 else:
1444 update = False
1444 update = False
1445 self.strip(repo, rev, update=update, backup='strip')
1445 self.strip(repo, rev, update=update, backup='strip')
1446 if qpp:
1446 if qpp:
1447 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1447 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1448 (short(qpp[0]), short(qpp[1])))
1448 (short(qpp[0]), short(qpp[1])))
1449 if qupdate:
1449 if qupdate:
1450 self.ui.status(_("queue directory updating\n"))
1450 self.ui.status(_("queue directory updating\n"))
1451 r = self.qrepo()
1451 r = self.qrepo()
1452 if not r:
1452 if not r:
1453 self.ui.warn(_("Unable to load queue repository\n"))
1453 self.ui.warn(_("Unable to load queue repository\n"))
1454 return 1
1454 return 1
1455 hg.clean(r, qpp[0])
1455 hg.clean(r, qpp[0])
1456
1456
1457 def save(self, repo, msg=None):
1457 def save(self, repo, msg=None):
1458 if len(self.applied) == 0:
1458 if len(self.applied) == 0:
1459 self.ui.warn(_("save: no patches applied, exiting\n"))
1459 self.ui.warn(_("save: no patches applied, exiting\n"))
1460 return 1
1460 return 1
1461 if self.issaveline(self.applied[-1]):
1461 if self.issaveline(self.applied[-1]):
1462 self.ui.warn(_("status is already saved\n"))
1462 self.ui.warn(_("status is already saved\n"))
1463 return 1
1463 return 1
1464
1464
1465 ar = [ ':' + x for x in self.full_series ]
1465 ar = [ ':' + x for x in self.full_series ]
1466 if not msg:
1466 if not msg:
1467 msg = _("hg patches saved state")
1467 msg = _("hg patches saved state")
1468 else:
1468 else:
1469 msg = "hg patches: " + msg.rstrip('\r\n')
1469 msg = "hg patches: " + msg.rstrip('\r\n')
1470 r = self.qrepo()
1470 r = self.qrepo()
1471 if r:
1471 if r:
1472 pp = r.dirstate.parents()
1472 pp = r.dirstate.parents()
1473 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1473 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1474 msg += "\n\nPatch Data:\n"
1474 msg += "\n\nPatch Data:\n"
1475 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1475 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1476 "\n".join(ar) + '\n' or "")
1476 "\n".join(ar) + '\n' or "")
1477 n = repo.commit(None, text, user=None, force=1)
1477 n = repo.commit(None, text, user=None, force=1)
1478 if not n:
1478 if not n:
1479 self.ui.warn(_("repo commit failed\n"))
1479 self.ui.warn(_("repo commit failed\n"))
1480 return 1
1480 return 1
1481 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1481 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1482 self.applied_dirty = 1
1482 self.applied_dirty = 1
1483 self.removeundo(repo)
1483 self.removeundo(repo)
1484
1484
1485 def full_series_end(self):
1485 def full_series_end(self):
1486 if len(self.applied) > 0:
1486 if len(self.applied) > 0:
1487 p = self.applied[-1].name
1487 p = self.applied[-1].name
1488 end = self.find_series(p)
1488 end = self.find_series(p)
1489 if end == None:
1489 if end is None:
1490 return len(self.full_series)
1490 return len(self.full_series)
1491 return end + 1
1491 return end + 1
1492 return 0
1492 return 0
1493
1493
1494 def series_end(self, all_patches=False):
1494 def series_end(self, all_patches=False):
1495 """If all_patches is False, return the index of the next pushable patch
1495 """If all_patches is False, return the index of the next pushable patch
1496 in the series, or the series length. If all_patches is True, return the
1496 in the series, or the series length. If all_patches is True, return the
1497 index of the first patch past the last applied one.
1497 index of the first patch past the last applied one.
1498 """
1498 """
1499 end = 0
1499 end = 0
1500 def next(start):
1500 def next(start):
1501 if all_patches:
1501 if all_patches:
1502 return start
1502 return start
1503 i = start
1503 i = start
1504 while i < len(self.series):
1504 while i < len(self.series):
1505 p, reason = self.pushable(i)
1505 p, reason = self.pushable(i)
1506 if p:
1506 if p:
1507 break
1507 break
1508 self.explain_pushable(i)
1508 self.explain_pushable(i)
1509 i += 1
1509 i += 1
1510 return i
1510 return i
1511 if len(self.applied) > 0:
1511 if len(self.applied) > 0:
1512 p = self.applied[-1].name
1512 p = self.applied[-1].name
1513 try:
1513 try:
1514 end = self.series.index(p)
1514 end = self.series.index(p)
1515 except ValueError:
1515 except ValueError:
1516 return 0
1516 return 0
1517 return next(end + 1)
1517 return next(end + 1)
1518 return next(end)
1518 return next(end)
1519
1519
1520 def appliedname(self, index):
1520 def appliedname(self, index):
1521 pname = self.applied[index].name
1521 pname = self.applied[index].name
1522 if not self.ui.verbose:
1522 if not self.ui.verbose:
1523 p = pname
1523 p = pname
1524 else:
1524 else:
1525 p = str(self.series.index(pname)) + " " + pname
1525 p = str(self.series.index(pname)) + " " + pname
1526 return p
1526 return p
1527
1527
1528 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1528 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1529 force=None, git=False):
1529 force=None, git=False):
1530 def checkseries(patchname):
1530 def checkseries(patchname):
1531 if patchname in self.series:
1531 if patchname in self.series:
1532 raise util.Abort(_('patch %s is already in the series file')
1532 raise util.Abort(_('patch %s is already in the series file')
1533 % patchname)
1533 % patchname)
1534 def checkfile(patchname):
1534 def checkfile(patchname):
1535 if not force and os.path.exists(self.join(patchname)):
1535 if not force and os.path.exists(self.join(patchname)):
1536 raise util.Abort(_('patch "%s" already exists')
1536 raise util.Abort(_('patch "%s" already exists')
1537 % patchname)
1537 % patchname)
1538
1538
1539 if rev:
1539 if rev:
1540 if files:
1540 if files:
1541 raise util.Abort(_('option "-r" not valid when importing '
1541 raise util.Abort(_('option "-r" not valid when importing '
1542 'files'))
1542 'files'))
1543 rev = cmdutil.revrange(repo, rev)
1543 rev = cmdutil.revrange(repo, rev)
1544 rev.sort(lambda x, y: cmp(y, x))
1544 rev.sort(lambda x, y: cmp(y, x))
1545 if (len(files) > 1 or len(rev) > 1) and patchname:
1545 if (len(files) > 1 or len(rev) > 1) and patchname:
1546 raise util.Abort(_('option "-n" not valid when importing multiple '
1546 raise util.Abort(_('option "-n" not valid when importing multiple '
1547 'patches'))
1547 'patches'))
1548 i = 0
1548 i = 0
1549 added = []
1549 added = []
1550 if rev:
1550 if rev:
1551 # If mq patches are applied, we can only import revisions
1551 # If mq patches are applied, we can only import revisions
1552 # that form a linear path to qbase.
1552 # that form a linear path to qbase.
1553 # Otherwise, they should form a linear path to a head.
1553 # Otherwise, they should form a linear path to a head.
1554 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1554 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1555 if len(heads) > 1:
1555 if len(heads) > 1:
1556 raise util.Abort(_('revision %d is the root of more than one '
1556 raise util.Abort(_('revision %d is the root of more than one '
1557 'branch') % rev[-1])
1557 'branch') % rev[-1])
1558 if self.applied:
1558 if self.applied:
1559 base = hex(repo.changelog.node(rev[0]))
1559 base = hex(repo.changelog.node(rev[0]))
1560 if base in [n.rev for n in self.applied]:
1560 if base in [n.rev for n in self.applied]:
1561 raise util.Abort(_('revision %d is already managed')
1561 raise util.Abort(_('revision %d is already managed')
1562 % rev[0])
1562 % rev[0])
1563 if heads != [bin(self.applied[-1].rev)]:
1563 if heads != [bin(self.applied[-1].rev)]:
1564 raise util.Abort(_('revision %d is not the parent of '
1564 raise util.Abort(_('revision %d is not the parent of '
1565 'the queue') % rev[0])
1565 'the queue') % rev[0])
1566 base = repo.changelog.rev(bin(self.applied[0].rev))
1566 base = repo.changelog.rev(bin(self.applied[0].rev))
1567 lastparent = repo.changelog.parentrevs(base)[0]
1567 lastparent = repo.changelog.parentrevs(base)[0]
1568 else:
1568 else:
1569 if heads != [repo.changelog.node(rev[0])]:
1569 if heads != [repo.changelog.node(rev[0])]:
1570 raise util.Abort(_('revision %d has unmanaged children')
1570 raise util.Abort(_('revision %d has unmanaged children')
1571 % rev[0])
1571 % rev[0])
1572 lastparent = None
1572 lastparent = None
1573
1573
1574 if git:
1574 if git:
1575 self.diffopts().git = True
1575 self.diffopts().git = True
1576
1576
1577 for r in rev:
1577 for r in rev:
1578 p1, p2 = repo.changelog.parentrevs(r)
1578 p1, p2 = repo.changelog.parentrevs(r)
1579 n = repo.changelog.node(r)
1579 n = repo.changelog.node(r)
1580 if p2 != nullrev:
1580 if p2 != nullrev:
1581 raise util.Abort(_('cannot import merge revision %d') % r)
1581 raise util.Abort(_('cannot import merge revision %d') % r)
1582 if lastparent and lastparent != r:
1582 if lastparent and lastparent != r:
1583 raise util.Abort(_('revision %d is not the parent of %d')
1583 raise util.Abort(_('revision %d is not the parent of %d')
1584 % (r, lastparent))
1584 % (r, lastparent))
1585 lastparent = p1
1585 lastparent = p1
1586
1586
1587 if not patchname:
1587 if not patchname:
1588 patchname = normname('%d.diff' % r)
1588 patchname = normname('%d.diff' % r)
1589 self.check_reserved_name(patchname)
1589 self.check_reserved_name(patchname)
1590 checkseries(patchname)
1590 checkseries(patchname)
1591 checkfile(patchname)
1591 checkfile(patchname)
1592 self.full_series.insert(0, patchname)
1592 self.full_series.insert(0, patchname)
1593
1593
1594 patchf = self.opener(patchname, "w")
1594 patchf = self.opener(patchname, "w")
1595 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1595 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1596 patchf.close()
1596 patchf.close()
1597
1597
1598 se = statusentry(hex(n), patchname)
1598 se = statusentry(hex(n), patchname)
1599 self.applied.insert(0, se)
1599 self.applied.insert(0, se)
1600
1600
1601 added.append(patchname)
1601 added.append(patchname)
1602 patchname = None
1602 patchname = None
1603 self.parse_series()
1603 self.parse_series()
1604 self.applied_dirty = 1
1604 self.applied_dirty = 1
1605
1605
1606 for filename in files:
1606 for filename in files:
1607 if existing:
1607 if existing:
1608 if filename == '-':
1608 if filename == '-':
1609 raise util.Abort(_('-e is incompatible with import from -'))
1609 raise util.Abort(_('-e is incompatible with import from -'))
1610 if not patchname:
1610 if not patchname:
1611 patchname = normname(filename)
1611 patchname = normname(filename)
1612 self.check_reserved_name(patchname)
1612 self.check_reserved_name(patchname)
1613 if not os.path.isfile(self.join(patchname)):
1613 if not os.path.isfile(self.join(patchname)):
1614 raise util.Abort(_("patch %s does not exist") % patchname)
1614 raise util.Abort(_("patch %s does not exist") % patchname)
1615 else:
1615 else:
1616 try:
1616 try:
1617 if filename == '-':
1617 if filename == '-':
1618 if not patchname:
1618 if not patchname:
1619 raise util.Abort(_('need --name to import a patch from -'))
1619 raise util.Abort(_('need --name to import a patch from -'))
1620 text = sys.stdin.read()
1620 text = sys.stdin.read()
1621 else:
1621 else:
1622 text = url.open(self.ui, filename).read()
1622 text = url.open(self.ui, filename).read()
1623 except (OSError, IOError):
1623 except (OSError, IOError):
1624 raise util.Abort(_("unable to read %s") % filename)
1624 raise util.Abort(_("unable to read %s") % filename)
1625 if not patchname:
1625 if not patchname:
1626 patchname = normname(os.path.basename(filename))
1626 patchname = normname(os.path.basename(filename))
1627 self.check_reserved_name(patchname)
1627 self.check_reserved_name(patchname)
1628 checkfile(patchname)
1628 checkfile(patchname)
1629 patchf = self.opener(patchname, "w")
1629 patchf = self.opener(patchname, "w")
1630 patchf.write(text)
1630 patchf.write(text)
1631 if not force:
1631 if not force:
1632 checkseries(patchname)
1632 checkseries(patchname)
1633 if patchname not in self.series:
1633 if patchname not in self.series:
1634 index = self.full_series_end() + i
1634 index = self.full_series_end() + i
1635 self.full_series[index:index] = [patchname]
1635 self.full_series[index:index] = [patchname]
1636 self.parse_series()
1636 self.parse_series()
1637 self.ui.warn(_("adding %s to series file\n") % patchname)
1637 self.ui.warn(_("adding %s to series file\n") % patchname)
1638 i += 1
1638 i += 1
1639 added.append(patchname)
1639 added.append(patchname)
1640 patchname = None
1640 patchname = None
1641 self.series_dirty = 1
1641 self.series_dirty = 1
1642 qrepo = self.qrepo()
1642 qrepo = self.qrepo()
1643 if qrepo:
1643 if qrepo:
1644 qrepo.add(added)
1644 qrepo.add(added)
1645
1645
1646 def delete(ui, repo, *patches, **opts):
1646 def delete(ui, repo, *patches, **opts):
1647 """remove patches from queue
1647 """remove patches from queue
1648
1648
1649 The patches must not be applied, unless they are arguments to the
1649 The patches must not be applied, unless they are arguments to the
1650 -r/--rev parameter. At least one patch or revision is required.
1650 -r/--rev parameter. At least one patch or revision is required.
1651
1651
1652 With --rev, mq will stop managing the named revisions (converting
1652 With --rev, mq will stop managing the named revisions (converting
1653 them to regular mercurial changesets). The qfinish command should
1653 them to regular mercurial changesets). The qfinish command should
1654 be used as an alternative for qdelete -r, as the latter option is
1654 be used as an alternative for qdelete -r, as the latter option is
1655 deprecated.
1655 deprecated.
1656
1656
1657 With -k/--keep, the patch files are preserved in the patch
1657 With -k/--keep, the patch files are preserved in the patch
1658 directory."""
1658 directory."""
1659 q = repo.mq
1659 q = repo.mq
1660 q.delete(repo, patches, opts)
1660 q.delete(repo, patches, opts)
1661 q.save_dirty()
1661 q.save_dirty()
1662 return 0
1662 return 0
1663
1663
1664 def applied(ui, repo, patch=None, **opts):
1664 def applied(ui, repo, patch=None, **opts):
1665 """print the patches already applied"""
1665 """print the patches already applied"""
1666 q = repo.mq
1666 q = repo.mq
1667 if patch:
1667 if patch:
1668 if patch not in q.series:
1668 if patch not in q.series:
1669 raise util.Abort(_("patch %s is not in series file") % patch)
1669 raise util.Abort(_("patch %s is not in series file") % patch)
1670 end = q.series.index(patch) + 1
1670 end = q.series.index(patch) + 1
1671 else:
1671 else:
1672 end = q.series_end(True)
1672 end = q.series_end(True)
1673 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1673 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1674
1674
1675 def unapplied(ui, repo, patch=None, **opts):
1675 def unapplied(ui, repo, patch=None, **opts):
1676 """print the patches not yet applied"""
1676 """print the patches not yet applied"""
1677 q = repo.mq
1677 q = repo.mq
1678 if patch:
1678 if patch:
1679 if patch not in q.series:
1679 if patch not in q.series:
1680 raise util.Abort(_("patch %s is not in series file") % patch)
1680 raise util.Abort(_("patch %s is not in series file") % patch)
1681 start = q.series.index(patch) + 1
1681 start = q.series.index(patch) + 1
1682 else:
1682 else:
1683 start = q.series_end(True)
1683 start = q.series_end(True)
1684 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1684 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1685
1685
1686 def qimport(ui, repo, *filename, **opts):
1686 def qimport(ui, repo, *filename, **opts):
1687 """import a patch
1687 """import a patch
1688
1688
1689 The patch is inserted into the series after the last applied
1689 The patch is inserted into the series after the last applied
1690 patch. If no patches have been applied, qimport prepends the patch
1690 patch. If no patches have been applied, qimport prepends the patch
1691 to the series.
1691 to the series.
1692
1692
1693 The patch will have the same name as its source file unless you
1693 The patch will have the same name as its source file unless you
1694 give it a new one with -n/--name.
1694 give it a new one with -n/--name.
1695
1695
1696 You can register an existing patch inside the patch directory with
1696 You can register an existing patch inside the patch directory with
1697 the -e/--existing flag.
1697 the -e/--existing flag.
1698
1698
1699 With -f/--force, an existing patch of the same name will be
1699 With -f/--force, an existing patch of the same name will be
1700 overwritten.
1700 overwritten.
1701
1701
1702 An existing changeset may be placed under mq control with -r/--rev
1702 An existing changeset may be placed under mq control with -r/--rev
1703 (e.g. qimport --rev tip -n patch will place tip under mq control).
1703 (e.g. qimport --rev tip -n patch will place tip under mq control).
1704 With -g/--git, patches imported with --rev will use the git diff
1704 With -g/--git, patches imported with --rev will use the git diff
1705 format. See the diffs help topic for information on why this is
1705 format. See the diffs help topic for information on why this is
1706 important for preserving rename/copy information and permission
1706 important for preserving rename/copy information and permission
1707 changes.
1707 changes.
1708
1708
1709 To import a patch from standard input, pass - as the patch file.
1709 To import a patch from standard input, pass - as the patch file.
1710 When importing from standard input, a patch name must be specified
1710 When importing from standard input, a patch name must be specified
1711 using the --name flag.
1711 using the --name flag.
1712 """
1712 """
1713 q = repo.mq
1713 q = repo.mq
1714 q.qimport(repo, filename, patchname=opts['name'],
1714 q.qimport(repo, filename, patchname=opts['name'],
1715 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1715 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1716 git=opts['git'])
1716 git=opts['git'])
1717 q.save_dirty()
1717 q.save_dirty()
1718
1718
1719 if opts.get('push') and not opts.get('rev'):
1719 if opts.get('push') and not opts.get('rev'):
1720 return q.push(repo, None)
1720 return q.push(repo, None)
1721 return 0
1721 return 0
1722
1722
1723 def init(ui, repo, **opts):
1723 def init(ui, repo, **opts):
1724 """init a new queue repository
1724 """init a new queue repository
1725
1725
1726 The queue repository is unversioned by default. If
1726 The queue repository is unversioned by default. If
1727 -c/--create-repo is specified, qinit will create a separate nested
1727 -c/--create-repo is specified, qinit will create a separate nested
1728 repository for patches (qinit -c may also be run later to convert
1728 repository for patches (qinit -c may also be run later to convert
1729 an unversioned patch repository into a versioned one). You can use
1729 an unversioned patch repository into a versioned one). You can use
1730 qcommit to commit changes to this queue repository."""
1730 qcommit to commit changes to this queue repository."""
1731 q = repo.mq
1731 q = repo.mq
1732 r = q.init(repo, create=opts['create_repo'])
1732 r = q.init(repo, create=opts['create_repo'])
1733 q.save_dirty()
1733 q.save_dirty()
1734 if r:
1734 if r:
1735 if not os.path.exists(r.wjoin('.hgignore')):
1735 if not os.path.exists(r.wjoin('.hgignore')):
1736 fp = r.wopener('.hgignore', 'w')
1736 fp = r.wopener('.hgignore', 'w')
1737 fp.write('^\\.hg\n')
1737 fp.write('^\\.hg\n')
1738 fp.write('^\\.mq\n')
1738 fp.write('^\\.mq\n')
1739 fp.write('syntax: glob\n')
1739 fp.write('syntax: glob\n')
1740 fp.write('status\n')
1740 fp.write('status\n')
1741 fp.write('guards\n')
1741 fp.write('guards\n')
1742 fp.close()
1742 fp.close()
1743 if not os.path.exists(r.wjoin('series')):
1743 if not os.path.exists(r.wjoin('series')):
1744 r.wopener('series', 'w').close()
1744 r.wopener('series', 'w').close()
1745 r.add(['.hgignore', 'series'])
1745 r.add(['.hgignore', 'series'])
1746 commands.add(ui, r)
1746 commands.add(ui, r)
1747 return 0
1747 return 0
1748
1748
1749 def clone(ui, source, dest=None, **opts):
1749 def clone(ui, source, dest=None, **opts):
1750 '''clone main and patch repository at same time
1750 '''clone main and patch repository at same time
1751
1751
1752 If source is local, destination will have no patches applied. If
1752 If source is local, destination will have no patches applied. If
1753 source is remote, this command can not check if patches are
1753 source is remote, this command can not check if patches are
1754 applied in source, so cannot guarantee that patches are not
1754 applied in source, so cannot guarantee that patches are not
1755 applied in destination. If you clone remote repository, be sure
1755 applied in destination. If you clone remote repository, be sure
1756 before that it has no patches applied.
1756 before that it has no patches applied.
1757
1757
1758 Source patch repository is looked for in <src>/.hg/patches by
1758 Source patch repository is looked for in <src>/.hg/patches by
1759 default. Use -p <url> to change.
1759 default. Use -p <url> to change.
1760
1760
1761 The patch directory must be a nested mercurial repository, as
1761 The patch directory must be a nested mercurial repository, as
1762 would be created by qinit -c.
1762 would be created by qinit -c.
1763 '''
1763 '''
1764 def patchdir(repo):
1764 def patchdir(repo):
1765 url = repo.url()
1765 url = repo.url()
1766 if url.endswith('/'):
1766 if url.endswith('/'):
1767 url = url[:-1]
1767 url = url[:-1]
1768 return url + '/.hg/patches'
1768 return url + '/.hg/patches'
1769 if dest is None:
1769 if dest is None:
1770 dest = hg.defaultdest(source)
1770 dest = hg.defaultdest(source)
1771 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1771 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1772 if opts['patches']:
1772 if opts['patches']:
1773 patchespath = ui.expandpath(opts['patches'])
1773 patchespath = ui.expandpath(opts['patches'])
1774 else:
1774 else:
1775 patchespath = patchdir(sr)
1775 patchespath = patchdir(sr)
1776 try:
1776 try:
1777 hg.repository(ui, patchespath)
1777 hg.repository(ui, patchespath)
1778 except error.RepoError:
1778 except error.RepoError:
1779 raise util.Abort(_('versioned patch repository not found'
1779 raise util.Abort(_('versioned patch repository not found'
1780 ' (see qinit -c)'))
1780 ' (see qinit -c)'))
1781 qbase, destrev = None, None
1781 qbase, destrev = None, None
1782 if sr.local():
1782 if sr.local():
1783 if sr.mq.applied:
1783 if sr.mq.applied:
1784 qbase = bin(sr.mq.applied[0].rev)
1784 qbase = bin(sr.mq.applied[0].rev)
1785 if not hg.islocal(dest):
1785 if not hg.islocal(dest):
1786 heads = set(sr.heads())
1786 heads = set(sr.heads())
1787 destrev = list(heads.difference(sr.heads(qbase)))
1787 destrev = list(heads.difference(sr.heads(qbase)))
1788 destrev.append(sr.changelog.parents(qbase)[0])
1788 destrev.append(sr.changelog.parents(qbase)[0])
1789 elif sr.capable('lookup'):
1789 elif sr.capable('lookup'):
1790 try:
1790 try:
1791 qbase = sr.lookup('qbase')
1791 qbase = sr.lookup('qbase')
1792 except error.RepoError:
1792 except error.RepoError:
1793 pass
1793 pass
1794 ui.note(_('cloning main repository\n'))
1794 ui.note(_('cloning main repository\n'))
1795 sr, dr = hg.clone(ui, sr.url(), dest,
1795 sr, dr = hg.clone(ui, sr.url(), dest,
1796 pull=opts['pull'],
1796 pull=opts['pull'],
1797 rev=destrev,
1797 rev=destrev,
1798 update=False,
1798 update=False,
1799 stream=opts['uncompressed'])
1799 stream=opts['uncompressed'])
1800 ui.note(_('cloning patch repository\n'))
1800 ui.note(_('cloning patch repository\n'))
1801 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1801 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1802 pull=opts['pull'], update=not opts['noupdate'],
1802 pull=opts['pull'], update=not opts['noupdate'],
1803 stream=opts['uncompressed'])
1803 stream=opts['uncompressed'])
1804 if dr.local():
1804 if dr.local():
1805 if qbase:
1805 if qbase:
1806 ui.note(_('stripping applied patches from destination '
1806 ui.note(_('stripping applied patches from destination '
1807 'repository\n'))
1807 'repository\n'))
1808 dr.mq.strip(dr, qbase, update=False, backup=None)
1808 dr.mq.strip(dr, qbase, update=False, backup=None)
1809 if not opts['noupdate']:
1809 if not opts['noupdate']:
1810 ui.note(_('updating destination repository\n'))
1810 ui.note(_('updating destination repository\n'))
1811 hg.update(dr, dr.changelog.tip())
1811 hg.update(dr, dr.changelog.tip())
1812
1812
1813 def commit(ui, repo, *pats, **opts):
1813 def commit(ui, repo, *pats, **opts):
1814 """commit changes in the queue repository"""
1814 """commit changes in the queue repository"""
1815 q = repo.mq
1815 q = repo.mq
1816 r = q.qrepo()
1816 r = q.qrepo()
1817 if not r: raise util.Abort('no queue repository')
1817 if not r: raise util.Abort('no queue repository')
1818 commands.commit(r.ui, r, *pats, **opts)
1818 commands.commit(r.ui, r, *pats, **opts)
1819
1819
1820 def series(ui, repo, **opts):
1820 def series(ui, repo, **opts):
1821 """print the entire series file"""
1821 """print the entire series file"""
1822 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1822 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1823 return 0
1823 return 0
1824
1824
1825 def top(ui, repo, **opts):
1825 def top(ui, repo, **opts):
1826 """print the name of the current patch"""
1826 """print the name of the current patch"""
1827 q = repo.mq
1827 q = repo.mq
1828 t = q.applied and q.series_end(True) or 0
1828 t = q.applied and q.series_end(True) or 0
1829 if t:
1829 if t:
1830 return q.qseries(repo, start=t-1, length=1, status='A',
1830 return q.qseries(repo, start=t-1, length=1, status='A',
1831 summary=opts.get('summary'))
1831 summary=opts.get('summary'))
1832 else:
1832 else:
1833 ui.write(_("no patches applied\n"))
1833 ui.write(_("no patches applied\n"))
1834 return 1
1834 return 1
1835
1835
1836 def next(ui, repo, **opts):
1836 def next(ui, repo, **opts):
1837 """print the name of the next patch"""
1837 """print the name of the next patch"""
1838 q = repo.mq
1838 q = repo.mq
1839 end = q.series_end()
1839 end = q.series_end()
1840 if end == len(q.series):
1840 if end == len(q.series):
1841 ui.write(_("all patches applied\n"))
1841 ui.write(_("all patches applied\n"))
1842 return 1
1842 return 1
1843 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1843 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1844
1844
1845 def prev(ui, repo, **opts):
1845 def prev(ui, repo, **opts):
1846 """print the name of the previous patch"""
1846 """print the name of the previous patch"""
1847 q = repo.mq
1847 q = repo.mq
1848 l = len(q.applied)
1848 l = len(q.applied)
1849 if l == 1:
1849 if l == 1:
1850 ui.write(_("only one patch applied\n"))
1850 ui.write(_("only one patch applied\n"))
1851 return 1
1851 return 1
1852 if not l:
1852 if not l:
1853 ui.write(_("no patches applied\n"))
1853 ui.write(_("no patches applied\n"))
1854 return 1
1854 return 1
1855 return q.qseries(repo, start=l-2, length=1, status='A',
1855 return q.qseries(repo, start=l-2, length=1, status='A',
1856 summary=opts.get('summary'))
1856 summary=opts.get('summary'))
1857
1857
1858 def setupheaderopts(ui, opts):
1858 def setupheaderopts(ui, opts):
1859 def do(opt,val):
1859 def do(opt,val):
1860 if not opts[opt] and opts['current' + opt]:
1860 if not opts[opt] and opts['current' + opt]:
1861 opts[opt] = val
1861 opts[opt] = val
1862 do('user', ui.username())
1862 do('user', ui.username())
1863 do('date', "%d %d" % util.makedate())
1863 do('date', "%d %d" % util.makedate())
1864
1864
1865 def new(ui, repo, patch, *args, **opts):
1865 def new(ui, repo, patch, *args, **opts):
1866 """create a new patch
1866 """create a new patch
1867
1867
1868 qnew creates a new patch on top of the currently-applied patch (if
1868 qnew creates a new patch on top of the currently-applied patch (if
1869 any). It will refuse to run if there are any outstanding changes
1869 any). It will refuse to run if there are any outstanding changes
1870 unless -f/--force is specified, in which case the patch will be
1870 unless -f/--force is specified, in which case the patch will be
1871 initialized with them. You may also use -I/--include,
1871 initialized with them. You may also use -I/--include,
1872 -X/--exclude, and/or a list of files after the patch name to add
1872 -X/--exclude, and/or a list of files after the patch name to add
1873 only changes to matching files to the new patch, leaving the rest
1873 only changes to matching files to the new patch, leaving the rest
1874 as uncommitted modifications.
1874 as uncommitted modifications.
1875
1875
1876 -u/--user and -d/--date can be used to set the (given) user and
1876 -u/--user and -d/--date can be used to set the (given) user and
1877 date, respectively. -U/--currentuser and -D/--currentdate set user
1877 date, respectively. -U/--currentuser and -D/--currentdate set user
1878 to current user and date to current date.
1878 to current user and date to current date.
1879
1879
1880 -e/--edit, -m/--message or -l/--logfile set the patch header as
1880 -e/--edit, -m/--message or -l/--logfile set the patch header as
1881 well as the commit message. If none is specified, the header is
1881 well as the commit message. If none is specified, the header is
1882 empty and the commit message is '[mq]: PATCH'.
1882 empty and the commit message is '[mq]: PATCH'.
1883
1883
1884 Use the -g/--git option to keep the patch in the git extended diff
1884 Use the -g/--git option to keep the patch in the git extended diff
1885 format. Read the diffs help topic for more information on why this
1885 format. Read the diffs help topic for more information on why this
1886 is important for preserving permission changes and copy/rename
1886 is important for preserving permission changes and copy/rename
1887 information.
1887 information.
1888 """
1888 """
1889 msg = cmdutil.logmessage(opts)
1889 msg = cmdutil.logmessage(opts)
1890 def getmsg(): return ui.edit(msg, ui.username())
1890 def getmsg(): return ui.edit(msg, ui.username())
1891 q = repo.mq
1891 q = repo.mq
1892 opts['msg'] = msg
1892 opts['msg'] = msg
1893 if opts.get('edit'):
1893 if opts.get('edit'):
1894 opts['msg'] = getmsg
1894 opts['msg'] = getmsg
1895 else:
1895 else:
1896 opts['msg'] = msg
1896 opts['msg'] = msg
1897 setupheaderopts(ui, opts)
1897 setupheaderopts(ui, opts)
1898 q.new(repo, patch, *args, **opts)
1898 q.new(repo, patch, *args, **opts)
1899 q.save_dirty()
1899 q.save_dirty()
1900 return 0
1900 return 0
1901
1901
1902 def refresh(ui, repo, *pats, **opts):
1902 def refresh(ui, repo, *pats, **opts):
1903 """update the current patch
1903 """update the current patch
1904
1904
1905 If any file patterns are provided, the refreshed patch will
1905 If any file patterns are provided, the refreshed patch will
1906 contain only the modifications that match those patterns; the
1906 contain only the modifications that match those patterns; the
1907 remaining modifications will remain in the working directory.
1907 remaining modifications will remain in the working directory.
1908
1908
1909 If -s/--short is specified, files currently included in the patch
1909 If -s/--short is specified, files currently included in the patch
1910 will be refreshed just like matched files and remain in the patch.
1910 will be refreshed just like matched files and remain in the patch.
1911
1911
1912 hg add/remove/copy/rename work as usual, though you might want to
1912 hg add/remove/copy/rename work as usual, though you might want to
1913 use git-style patches (-g/--git or [diff] git=1) to track copies
1913 use git-style patches (-g/--git or [diff] git=1) to track copies
1914 and renames. See the diffs help topic for more information on the
1914 and renames. See the diffs help topic for more information on the
1915 git diff format.
1915 git diff format.
1916 """
1916 """
1917 q = repo.mq
1917 q = repo.mq
1918 message = cmdutil.logmessage(opts)
1918 message = cmdutil.logmessage(opts)
1919 if opts['edit']:
1919 if opts['edit']:
1920 if not q.applied:
1920 if not q.applied:
1921 ui.write(_("no patches applied\n"))
1921 ui.write(_("no patches applied\n"))
1922 return 1
1922 return 1
1923 if message:
1923 if message:
1924 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1924 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1925 patch = q.applied[-1].name
1925 patch = q.applied[-1].name
1926 ph = q.readheaders(patch)
1926 ph = q.readheaders(patch)
1927 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1927 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1928 setupheaderopts(ui, opts)
1928 setupheaderopts(ui, opts)
1929 ret = q.refresh(repo, pats, msg=message, **opts)
1929 ret = q.refresh(repo, pats, msg=message, **opts)
1930 q.save_dirty()
1930 q.save_dirty()
1931 return ret
1931 return ret
1932
1932
1933 def diff(ui, repo, *pats, **opts):
1933 def diff(ui, repo, *pats, **opts):
1934 """diff of the current patch and subsequent modifications
1934 """diff of the current patch and subsequent modifications
1935
1935
1936 Shows a diff which includes the current patch as well as any
1936 Shows a diff which includes the current patch as well as any
1937 changes which have been made in the working directory since the
1937 changes which have been made in the working directory since the
1938 last refresh (thus showing what the current patch would become
1938 last refresh (thus showing what the current patch would become
1939 after a qrefresh).
1939 after a qrefresh).
1940
1940
1941 Use 'hg diff' if you only want to see the changes made since the
1941 Use 'hg diff' if you only want to see the changes made since the
1942 last qrefresh, or 'hg export qtip' if you want to see changes made
1942 last qrefresh, or 'hg export qtip' if you want to see changes made
1943 by the current patch without including changes made since the
1943 by the current patch without including changes made since the
1944 qrefresh.
1944 qrefresh.
1945 """
1945 """
1946 repo.mq.diff(repo, pats, opts)
1946 repo.mq.diff(repo, pats, opts)
1947 return 0
1947 return 0
1948
1948
1949 def fold(ui, repo, *files, **opts):
1949 def fold(ui, repo, *files, **opts):
1950 """fold the named patches into the current patch
1950 """fold the named patches into the current patch
1951
1951
1952 Patches must not yet be applied. Each patch will be successively
1952 Patches must not yet be applied. Each patch will be successively
1953 applied to the current patch in the order given. If all the
1953 applied to the current patch in the order given. If all the
1954 patches apply successfully, the current patch will be refreshed
1954 patches apply successfully, the current patch will be refreshed
1955 with the new cumulative patch, and the folded patches will be
1955 with the new cumulative patch, and the folded patches will be
1956 deleted. With -k/--keep, the folded patch files will not be
1956 deleted. With -k/--keep, the folded patch files will not be
1957 removed afterwards.
1957 removed afterwards.
1958
1958
1959 The header for each folded patch will be concatenated with the
1959 The header for each folded patch will be concatenated with the
1960 current patch header, separated by a line of '* * *'."""
1960 current patch header, separated by a line of '* * *'."""
1961
1961
1962 q = repo.mq
1962 q = repo.mq
1963
1963
1964 if not files:
1964 if not files:
1965 raise util.Abort(_('qfold requires at least one patch name'))
1965 raise util.Abort(_('qfold requires at least one patch name'))
1966 if not q.check_toppatch(repo):
1966 if not q.check_toppatch(repo):
1967 raise util.Abort(_('No patches applied'))
1967 raise util.Abort(_('No patches applied'))
1968 q.check_localchanges(repo)
1968 q.check_localchanges(repo)
1969
1969
1970 message = cmdutil.logmessage(opts)
1970 message = cmdutil.logmessage(opts)
1971 if opts['edit']:
1971 if opts['edit']:
1972 if message:
1972 if message:
1973 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1973 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1974
1974
1975 parent = q.lookup('qtip')
1975 parent = q.lookup('qtip')
1976 patches = []
1976 patches = []
1977 messages = []
1977 messages = []
1978 for f in files:
1978 for f in files:
1979 p = q.lookup(f)
1979 p = q.lookup(f)
1980 if p in patches or p == parent:
1980 if p in patches or p == parent:
1981 ui.warn(_('Skipping already folded patch %s') % p)
1981 ui.warn(_('Skipping already folded patch %s') % p)
1982 if q.isapplied(p):
1982 if q.isapplied(p):
1983 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1983 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1984 patches.append(p)
1984 patches.append(p)
1985
1985
1986 for p in patches:
1986 for p in patches:
1987 if not message:
1987 if not message:
1988 ph = q.readheaders(p)
1988 ph = q.readheaders(p)
1989 if ph.message:
1989 if ph.message:
1990 messages.append(ph.message)
1990 messages.append(ph.message)
1991 pf = q.join(p)
1991 pf = q.join(p)
1992 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1992 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1993 if not patchsuccess:
1993 if not patchsuccess:
1994 raise util.Abort(_('Error folding patch %s') % p)
1994 raise util.Abort(_('Error folding patch %s') % p)
1995 patch.updatedir(ui, repo, files)
1995 patch.updatedir(ui, repo, files)
1996
1996
1997 if not message:
1997 if not message:
1998 ph = q.readheaders(parent)
1998 ph = q.readheaders(parent)
1999 message, user = ph.message, ph.user
1999 message, user = ph.message, ph.user
2000 for msg in messages:
2000 for msg in messages:
2001 message.append('* * *')
2001 message.append('* * *')
2002 message.extend(msg)
2002 message.extend(msg)
2003 message = '\n'.join(message)
2003 message = '\n'.join(message)
2004
2004
2005 if opts['edit']:
2005 if opts['edit']:
2006 message = ui.edit(message, user or ui.username())
2006 message = ui.edit(message, user or ui.username())
2007
2007
2008 q.refresh(repo, msg=message)
2008 q.refresh(repo, msg=message)
2009 q.delete(repo, patches, opts)
2009 q.delete(repo, patches, opts)
2010 q.save_dirty()
2010 q.save_dirty()
2011
2011
2012 def goto(ui, repo, patch, **opts):
2012 def goto(ui, repo, patch, **opts):
2013 '''push or pop patches until named patch is at top of stack'''
2013 '''push or pop patches until named patch is at top of stack'''
2014 q = repo.mq
2014 q = repo.mq
2015 patch = q.lookup(patch)
2015 patch = q.lookup(patch)
2016 if q.isapplied(patch):
2016 if q.isapplied(patch):
2017 ret = q.pop(repo, patch, force=opts['force'])
2017 ret = q.pop(repo, patch, force=opts['force'])
2018 else:
2018 else:
2019 ret = q.push(repo, patch, force=opts['force'])
2019 ret = q.push(repo, patch, force=opts['force'])
2020 q.save_dirty()
2020 q.save_dirty()
2021 return ret
2021 return ret
2022
2022
2023 def guard(ui, repo, *args, **opts):
2023 def guard(ui, repo, *args, **opts):
2024 '''set or print guards for a patch
2024 '''set or print guards for a patch
2025
2025
2026 Guards control whether a patch can be pushed. A patch with no
2026 Guards control whether a patch can be pushed. A patch with no
2027 guards is always pushed. A patch with a positive guard ("+foo") is
2027 guards is always pushed. A patch with a positive guard ("+foo") is
2028 pushed only if the qselect command has activated it. A patch with
2028 pushed only if the qselect command has activated it. A patch with
2029 a negative guard ("-foo") is never pushed if the qselect command
2029 a negative guard ("-foo") is never pushed if the qselect command
2030 has activated it.
2030 has activated it.
2031
2031
2032 With no arguments, print the currently active guards.
2032 With no arguments, print the currently active guards.
2033 With arguments, set guards for the named patch.
2033 With arguments, set guards for the named patch.
2034 NOTE: Specifying negative guards now requires '--'.
2034 NOTE: Specifying negative guards now requires '--'.
2035
2035
2036 To set guards on another patch:
2036 To set guards on another patch:
2037 hg qguard -- other.patch +2.6.17 -stable
2037 hg qguard -- other.patch +2.6.17 -stable
2038 '''
2038 '''
2039 def status(idx):
2039 def status(idx):
2040 guards = q.series_guards[idx] or ['unguarded']
2040 guards = q.series_guards[idx] or ['unguarded']
2041 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2041 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2042 q = repo.mq
2042 q = repo.mq
2043 patch = None
2043 patch = None
2044 args = list(args)
2044 args = list(args)
2045 if opts['list']:
2045 if opts['list']:
2046 if args or opts['none']:
2046 if args or opts['none']:
2047 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2047 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2048 for i in xrange(len(q.series)):
2048 for i in xrange(len(q.series)):
2049 status(i)
2049 status(i)
2050 return
2050 return
2051 if not args or args[0][0:1] in '-+':
2051 if not args or args[0][0:1] in '-+':
2052 if not q.applied:
2052 if not q.applied:
2053 raise util.Abort(_('no patches applied'))
2053 raise util.Abort(_('no patches applied'))
2054 patch = q.applied[-1].name
2054 patch = q.applied[-1].name
2055 if patch is None and args[0][0:1] not in '-+':
2055 if patch is None and args[0][0:1] not in '-+':
2056 patch = args.pop(0)
2056 patch = args.pop(0)
2057 if patch is None:
2057 if patch is None:
2058 raise util.Abort(_('no patch to work with'))
2058 raise util.Abort(_('no patch to work with'))
2059 if args or opts['none']:
2059 if args or opts['none']:
2060 idx = q.find_series(patch)
2060 idx = q.find_series(patch)
2061 if idx is None:
2061 if idx is None:
2062 raise util.Abort(_('no patch named %s') % patch)
2062 raise util.Abort(_('no patch named %s') % patch)
2063 q.set_guards(idx, args)
2063 q.set_guards(idx, args)
2064 q.save_dirty()
2064 q.save_dirty()
2065 else:
2065 else:
2066 status(q.series.index(q.lookup(patch)))
2066 status(q.series.index(q.lookup(patch)))
2067
2067
2068 def header(ui, repo, patch=None):
2068 def header(ui, repo, patch=None):
2069 """print the header of the topmost or specified patch"""
2069 """print the header of the topmost or specified patch"""
2070 q = repo.mq
2070 q = repo.mq
2071
2071
2072 if patch:
2072 if patch:
2073 patch = q.lookup(patch)
2073 patch = q.lookup(patch)
2074 else:
2074 else:
2075 if not q.applied:
2075 if not q.applied:
2076 ui.write('no patches applied\n')
2076 ui.write('no patches applied\n')
2077 return 1
2077 return 1
2078 patch = q.lookup('qtip')
2078 patch = q.lookup('qtip')
2079 ph = repo.mq.readheaders(patch)
2079 ph = repo.mq.readheaders(patch)
2080
2080
2081 ui.write('\n'.join(ph.message) + '\n')
2081 ui.write('\n'.join(ph.message) + '\n')
2082
2082
2083 def lastsavename(path):
2083 def lastsavename(path):
2084 (directory, base) = os.path.split(path)
2084 (directory, base) = os.path.split(path)
2085 names = os.listdir(directory)
2085 names = os.listdir(directory)
2086 namere = re.compile("%s.([0-9]+)" % base)
2086 namere = re.compile("%s.([0-9]+)" % base)
2087 maxindex = None
2087 maxindex = None
2088 maxname = None
2088 maxname = None
2089 for f in names:
2089 for f in names:
2090 m = namere.match(f)
2090 m = namere.match(f)
2091 if m:
2091 if m:
2092 index = int(m.group(1))
2092 index = int(m.group(1))
2093 if maxindex == None or index > maxindex:
2093 if maxindex is None or index > maxindex:
2094 maxindex = index
2094 maxindex = index
2095 maxname = f
2095 maxname = f
2096 if maxname:
2096 if maxname:
2097 return (os.path.join(directory, maxname), maxindex)
2097 return (os.path.join(directory, maxname), maxindex)
2098 return (None, None)
2098 return (None, None)
2099
2099
2100 def savename(path):
2100 def savename(path):
2101 (last, index) = lastsavename(path)
2101 (last, index) = lastsavename(path)
2102 if last is None:
2102 if last is None:
2103 index = 0
2103 index = 0
2104 newpath = path + ".%d" % (index + 1)
2104 newpath = path + ".%d" % (index + 1)
2105 return newpath
2105 return newpath
2106
2106
2107 def push(ui, repo, patch=None, **opts):
2107 def push(ui, repo, patch=None, **opts):
2108 """push the next patch onto the stack
2108 """push the next patch onto the stack
2109
2109
2110 When -f/--force is applied, all local changes in patched files
2110 When -f/--force is applied, all local changes in patched files
2111 will be lost.
2111 will be lost.
2112 """
2112 """
2113 q = repo.mq
2113 q = repo.mq
2114 mergeq = None
2114 mergeq = None
2115
2115
2116 if opts['merge']:
2116 if opts['merge']:
2117 if opts['name']:
2117 if opts['name']:
2118 newpath = repo.join(opts['name'])
2118 newpath = repo.join(opts['name'])
2119 else:
2119 else:
2120 newpath, i = lastsavename(q.path)
2120 newpath, i = lastsavename(q.path)
2121 if not newpath:
2121 if not newpath:
2122 ui.warn(_("no saved queues found, please use -n\n"))
2122 ui.warn(_("no saved queues found, please use -n\n"))
2123 return 1
2123 return 1
2124 mergeq = queue(ui, repo.join(""), newpath)
2124 mergeq = queue(ui, repo.join(""), newpath)
2125 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2125 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2126 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2126 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2127 mergeq=mergeq, all=opts.get('all'))
2127 mergeq=mergeq, all=opts.get('all'))
2128 return ret
2128 return ret
2129
2129
2130 def pop(ui, repo, patch=None, **opts):
2130 def pop(ui, repo, patch=None, **opts):
2131 """pop the current patch off the stack
2131 """pop the current patch off the stack
2132
2132
2133 By default, pops off the top of the patch stack. If given a patch
2133 By default, pops off the top of the patch stack. If given a patch
2134 name, keeps popping off patches until the named patch is at the
2134 name, keeps popping off patches until the named patch is at the
2135 top of the stack.
2135 top of the stack.
2136 """
2136 """
2137 localupdate = True
2137 localupdate = True
2138 if opts['name']:
2138 if opts['name']:
2139 q = queue(ui, repo.join(""), repo.join(opts['name']))
2139 q = queue(ui, repo.join(""), repo.join(opts['name']))
2140 ui.warn(_('using patch queue: %s\n') % q.path)
2140 ui.warn(_('using patch queue: %s\n') % q.path)
2141 localupdate = False
2141 localupdate = False
2142 else:
2142 else:
2143 q = repo.mq
2143 q = repo.mq
2144 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2144 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2145 all=opts['all'])
2145 all=opts['all'])
2146 q.save_dirty()
2146 q.save_dirty()
2147 return ret
2147 return ret
2148
2148
2149 def rename(ui, repo, patch, name=None, **opts):
2149 def rename(ui, repo, patch, name=None, **opts):
2150 """rename a patch
2150 """rename a patch
2151
2151
2152 With one argument, renames the current patch to PATCH1.
2152 With one argument, renames the current patch to PATCH1.
2153 With two arguments, renames PATCH1 to PATCH2."""
2153 With two arguments, renames PATCH1 to PATCH2."""
2154
2154
2155 q = repo.mq
2155 q = repo.mq
2156
2156
2157 if not name:
2157 if not name:
2158 name = patch
2158 name = patch
2159 patch = None
2159 patch = None
2160
2160
2161 if patch:
2161 if patch:
2162 patch = q.lookup(patch)
2162 patch = q.lookup(patch)
2163 else:
2163 else:
2164 if not q.applied:
2164 if not q.applied:
2165 ui.write(_('no patches applied\n'))
2165 ui.write(_('no patches applied\n'))
2166 return
2166 return
2167 patch = q.lookup('qtip')
2167 patch = q.lookup('qtip')
2168 absdest = q.join(name)
2168 absdest = q.join(name)
2169 if os.path.isdir(absdest):
2169 if os.path.isdir(absdest):
2170 name = normname(os.path.join(name, os.path.basename(patch)))
2170 name = normname(os.path.join(name, os.path.basename(patch)))
2171 absdest = q.join(name)
2171 absdest = q.join(name)
2172 if os.path.exists(absdest):
2172 if os.path.exists(absdest):
2173 raise util.Abort(_('%s already exists') % absdest)
2173 raise util.Abort(_('%s already exists') % absdest)
2174
2174
2175 if name in q.series:
2175 if name in q.series:
2176 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2176 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2177
2177
2178 if ui.verbose:
2178 if ui.verbose:
2179 ui.write('renaming %s to %s\n' % (patch, name))
2179 ui.write('renaming %s to %s\n' % (patch, name))
2180 i = q.find_series(patch)
2180 i = q.find_series(patch)
2181 guards = q.guard_re.findall(q.full_series[i])
2181 guards = q.guard_re.findall(q.full_series[i])
2182 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2182 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2183 q.parse_series()
2183 q.parse_series()
2184 q.series_dirty = 1
2184 q.series_dirty = 1
2185
2185
2186 info = q.isapplied(patch)
2186 info = q.isapplied(patch)
2187 if info:
2187 if info:
2188 q.applied[info[0]] = statusentry(info[1], name)
2188 q.applied[info[0]] = statusentry(info[1], name)
2189 q.applied_dirty = 1
2189 q.applied_dirty = 1
2190
2190
2191 util.rename(q.join(patch), absdest)
2191 util.rename(q.join(patch), absdest)
2192 r = q.qrepo()
2192 r = q.qrepo()
2193 if r:
2193 if r:
2194 wlock = r.wlock()
2194 wlock = r.wlock()
2195 try:
2195 try:
2196 if r.dirstate[patch] == 'a':
2196 if r.dirstate[patch] == 'a':
2197 r.dirstate.forget(patch)
2197 r.dirstate.forget(patch)
2198 r.dirstate.add(name)
2198 r.dirstate.add(name)
2199 else:
2199 else:
2200 if r.dirstate[name] == 'r':
2200 if r.dirstate[name] == 'r':
2201 r.undelete([name])
2201 r.undelete([name])
2202 r.copy(patch, name)
2202 r.copy(patch, name)
2203 r.remove([patch], False)
2203 r.remove([patch], False)
2204 finally:
2204 finally:
2205 wlock.release()
2205 wlock.release()
2206
2206
2207 q.save_dirty()
2207 q.save_dirty()
2208
2208
2209 def restore(ui, repo, rev, **opts):
2209 def restore(ui, repo, rev, **opts):
2210 """restore the queue state saved by a revision"""
2210 """restore the queue state saved by a revision"""
2211 rev = repo.lookup(rev)
2211 rev = repo.lookup(rev)
2212 q = repo.mq
2212 q = repo.mq
2213 q.restore(repo, rev, delete=opts['delete'],
2213 q.restore(repo, rev, delete=opts['delete'],
2214 qupdate=opts['update'])
2214 qupdate=opts['update'])
2215 q.save_dirty()
2215 q.save_dirty()
2216 return 0
2216 return 0
2217
2217
2218 def save(ui, repo, **opts):
2218 def save(ui, repo, **opts):
2219 """save current queue state"""
2219 """save current queue state"""
2220 q = repo.mq
2220 q = repo.mq
2221 message = cmdutil.logmessage(opts)
2221 message = cmdutil.logmessage(opts)
2222 ret = q.save(repo, msg=message)
2222 ret = q.save(repo, msg=message)
2223 if ret:
2223 if ret:
2224 return ret
2224 return ret
2225 q.save_dirty()
2225 q.save_dirty()
2226 if opts['copy']:
2226 if opts['copy']:
2227 path = q.path
2227 path = q.path
2228 if opts['name']:
2228 if opts['name']:
2229 newpath = os.path.join(q.basepath, opts['name'])
2229 newpath = os.path.join(q.basepath, opts['name'])
2230 if os.path.exists(newpath):
2230 if os.path.exists(newpath):
2231 if not os.path.isdir(newpath):
2231 if not os.path.isdir(newpath):
2232 raise util.Abort(_('destination %s exists and is not '
2232 raise util.Abort(_('destination %s exists and is not '
2233 'a directory') % newpath)
2233 'a directory') % newpath)
2234 if not opts['force']:
2234 if not opts['force']:
2235 raise util.Abort(_('destination %s exists, '
2235 raise util.Abort(_('destination %s exists, '
2236 'use -f to force') % newpath)
2236 'use -f to force') % newpath)
2237 else:
2237 else:
2238 newpath = savename(path)
2238 newpath = savename(path)
2239 ui.warn(_("copy %s to %s\n") % (path, newpath))
2239 ui.warn(_("copy %s to %s\n") % (path, newpath))
2240 util.copyfiles(path, newpath)
2240 util.copyfiles(path, newpath)
2241 if opts['empty']:
2241 if opts['empty']:
2242 try:
2242 try:
2243 os.unlink(q.join(q.status_path))
2243 os.unlink(q.join(q.status_path))
2244 except:
2244 except:
2245 pass
2245 pass
2246 return 0
2246 return 0
2247
2247
2248 def strip(ui, repo, rev, **opts):
2248 def strip(ui, repo, rev, **opts):
2249 """strip a revision and all its descendants from the repository
2249 """strip a revision and all its descendants from the repository
2250
2250
2251 If one of the working directory's parent revisions is stripped, the
2251 If one of the working directory's parent revisions is stripped, the
2252 working directory will be updated to the parent of the stripped
2252 working directory will be updated to the parent of the stripped
2253 revision.
2253 revision.
2254 """
2254 """
2255 backup = 'all'
2255 backup = 'all'
2256 if opts['backup']:
2256 if opts['backup']:
2257 backup = 'strip'
2257 backup = 'strip'
2258 elif opts['nobackup']:
2258 elif opts['nobackup']:
2259 backup = 'none'
2259 backup = 'none'
2260
2260
2261 rev = repo.lookup(rev)
2261 rev = repo.lookup(rev)
2262 p = repo.dirstate.parents()
2262 p = repo.dirstate.parents()
2263 cl = repo.changelog
2263 cl = repo.changelog
2264 update = True
2264 update = True
2265 if p[0] == nullid:
2265 if p[0] == nullid:
2266 update = False
2266 update = False
2267 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2267 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2268 update = False
2268 update = False
2269 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2269 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2270 update = False
2270 update = False
2271
2271
2272 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2272 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2273 return 0
2273 return 0
2274
2274
2275 def select(ui, repo, *args, **opts):
2275 def select(ui, repo, *args, **opts):
2276 '''set or print guarded patches to push
2276 '''set or print guarded patches to push
2277
2277
2278 Use the qguard command to set or print guards on patch, then use
2278 Use the qguard command to set or print guards on patch, then use
2279 qselect to tell mq which guards to use. A patch will be pushed if
2279 qselect to tell mq which guards to use. A patch will be pushed if
2280 it has no guards or any positive guards match the currently
2280 it has no guards or any positive guards match the currently
2281 selected guard, but will not be pushed if any negative guards
2281 selected guard, but will not be pushed if any negative guards
2282 match the current guard. For example:
2282 match the current guard. For example:
2283
2283
2284 qguard foo.patch -stable (negative guard)
2284 qguard foo.patch -stable (negative guard)
2285 qguard bar.patch +stable (positive guard)
2285 qguard bar.patch +stable (positive guard)
2286 qselect stable
2286 qselect stable
2287
2287
2288 This activates the "stable" guard. mq will skip foo.patch (because
2288 This activates the "stable" guard. mq will skip foo.patch (because
2289 it has a negative match) but push bar.patch (because it has a
2289 it has a negative match) but push bar.patch (because it has a
2290 positive match).
2290 positive match).
2291
2291
2292 With no arguments, prints the currently active guards.
2292 With no arguments, prints the currently active guards.
2293 With one argument, sets the active guard.
2293 With one argument, sets the active guard.
2294
2294
2295 Use -n/--none to deactivate guards (no other arguments needed).
2295 Use -n/--none to deactivate guards (no other arguments needed).
2296 When no guards are active, patches with positive guards are
2296 When no guards are active, patches with positive guards are
2297 skipped and patches with negative guards are pushed.
2297 skipped and patches with negative guards are pushed.
2298
2298
2299 qselect can change the guards on applied patches. It does not pop
2299 qselect can change the guards on applied patches. It does not pop
2300 guarded patches by default. Use --pop to pop back to the last
2300 guarded patches by default. Use --pop to pop back to the last
2301 applied patch that is not guarded. Use --reapply (which implies
2301 applied patch that is not guarded. Use --reapply (which implies
2302 --pop) to push back to the current patch afterwards, but skip
2302 --pop) to push back to the current patch afterwards, but skip
2303 guarded patches.
2303 guarded patches.
2304
2304
2305 Use -s/--series to print a list of all guards in the series file
2305 Use -s/--series to print a list of all guards in the series file
2306 (no other arguments needed). Use -v for more information.'''
2306 (no other arguments needed). Use -v for more information.'''
2307
2307
2308 q = repo.mq
2308 q = repo.mq
2309 guards = q.active()
2309 guards = q.active()
2310 if args or opts['none']:
2310 if args or opts['none']:
2311 old_unapplied = q.unapplied(repo)
2311 old_unapplied = q.unapplied(repo)
2312 old_guarded = [i for i in xrange(len(q.applied)) if
2312 old_guarded = [i for i in xrange(len(q.applied)) if
2313 not q.pushable(i)[0]]
2313 not q.pushable(i)[0]]
2314 q.set_active(args)
2314 q.set_active(args)
2315 q.save_dirty()
2315 q.save_dirty()
2316 if not args:
2316 if not args:
2317 ui.status(_('guards deactivated\n'))
2317 ui.status(_('guards deactivated\n'))
2318 if not opts['pop'] and not opts['reapply']:
2318 if not opts['pop'] and not opts['reapply']:
2319 unapplied = q.unapplied(repo)
2319 unapplied = q.unapplied(repo)
2320 guarded = [i for i in xrange(len(q.applied))
2320 guarded = [i for i in xrange(len(q.applied))
2321 if not q.pushable(i)[0]]
2321 if not q.pushable(i)[0]]
2322 if len(unapplied) != len(old_unapplied):
2322 if len(unapplied) != len(old_unapplied):
2323 ui.status(_('number of unguarded, unapplied patches has '
2323 ui.status(_('number of unguarded, unapplied patches has '
2324 'changed from %d to %d\n') %
2324 'changed from %d to %d\n') %
2325 (len(old_unapplied), len(unapplied)))
2325 (len(old_unapplied), len(unapplied)))
2326 if len(guarded) != len(old_guarded):
2326 if len(guarded) != len(old_guarded):
2327 ui.status(_('number of guarded, applied patches has changed '
2327 ui.status(_('number of guarded, applied patches has changed '
2328 'from %d to %d\n') %
2328 'from %d to %d\n') %
2329 (len(old_guarded), len(guarded)))
2329 (len(old_guarded), len(guarded)))
2330 elif opts['series']:
2330 elif opts['series']:
2331 guards = {}
2331 guards = {}
2332 noguards = 0
2332 noguards = 0
2333 for gs in q.series_guards:
2333 for gs in q.series_guards:
2334 if not gs:
2334 if not gs:
2335 noguards += 1
2335 noguards += 1
2336 for g in gs:
2336 for g in gs:
2337 guards.setdefault(g, 0)
2337 guards.setdefault(g, 0)
2338 guards[g] += 1
2338 guards[g] += 1
2339 if ui.verbose:
2339 if ui.verbose:
2340 guards['NONE'] = noguards
2340 guards['NONE'] = noguards
2341 guards = guards.items()
2341 guards = guards.items()
2342 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2342 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2343 if guards:
2343 if guards:
2344 ui.note(_('guards in series file:\n'))
2344 ui.note(_('guards in series file:\n'))
2345 for guard, count in guards:
2345 for guard, count in guards:
2346 ui.note('%2d ' % count)
2346 ui.note('%2d ' % count)
2347 ui.write(guard, '\n')
2347 ui.write(guard, '\n')
2348 else:
2348 else:
2349 ui.note(_('no guards in series file\n'))
2349 ui.note(_('no guards in series file\n'))
2350 else:
2350 else:
2351 if guards:
2351 if guards:
2352 ui.note(_('active guards:\n'))
2352 ui.note(_('active guards:\n'))
2353 for g in guards:
2353 for g in guards:
2354 ui.write(g, '\n')
2354 ui.write(g, '\n')
2355 else:
2355 else:
2356 ui.write(_('no active guards\n'))
2356 ui.write(_('no active guards\n'))
2357 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2357 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2358 popped = False
2358 popped = False
2359 if opts['pop'] or opts['reapply']:
2359 if opts['pop'] or opts['reapply']:
2360 for i in xrange(len(q.applied)):
2360 for i in xrange(len(q.applied)):
2361 pushable, reason = q.pushable(i)
2361 pushable, reason = q.pushable(i)
2362 if not pushable:
2362 if not pushable:
2363 ui.status(_('popping guarded patches\n'))
2363 ui.status(_('popping guarded patches\n'))
2364 popped = True
2364 popped = True
2365 if i == 0:
2365 if i == 0:
2366 q.pop(repo, all=True)
2366 q.pop(repo, all=True)
2367 else:
2367 else:
2368 q.pop(repo, i-1)
2368 q.pop(repo, i-1)
2369 break
2369 break
2370 if popped:
2370 if popped:
2371 try:
2371 try:
2372 if reapply:
2372 if reapply:
2373 ui.status(_('reapplying unguarded patches\n'))
2373 ui.status(_('reapplying unguarded patches\n'))
2374 q.push(repo, reapply)
2374 q.push(repo, reapply)
2375 finally:
2375 finally:
2376 q.save_dirty()
2376 q.save_dirty()
2377
2377
2378 def finish(ui, repo, *revrange, **opts):
2378 def finish(ui, repo, *revrange, **opts):
2379 """move applied patches into repository history
2379 """move applied patches into repository history
2380
2380
2381 Finishes the specified revisions (corresponding to applied
2381 Finishes the specified revisions (corresponding to applied
2382 patches) by moving them out of mq control into regular repository
2382 patches) by moving them out of mq control into regular repository
2383 history.
2383 history.
2384
2384
2385 Accepts a revision range or the -a/--applied option. If --applied
2385 Accepts a revision range or the -a/--applied option. If --applied
2386 is specified, all applied mq revisions are removed from mq
2386 is specified, all applied mq revisions are removed from mq
2387 control. Otherwise, the given revisions must be at the base of the
2387 control. Otherwise, the given revisions must be at the base of the
2388 stack of applied patches.
2388 stack of applied patches.
2389
2389
2390 This can be especially useful if your changes have been applied to
2390 This can be especially useful if your changes have been applied to
2391 an upstream repository, or if you are about to push your changes
2391 an upstream repository, or if you are about to push your changes
2392 to upstream.
2392 to upstream.
2393 """
2393 """
2394 if not opts['applied'] and not revrange:
2394 if not opts['applied'] and not revrange:
2395 raise util.Abort(_('no revisions specified'))
2395 raise util.Abort(_('no revisions specified'))
2396 elif opts['applied']:
2396 elif opts['applied']:
2397 revrange = ('qbase:qtip',) + revrange
2397 revrange = ('qbase:qtip',) + revrange
2398
2398
2399 q = repo.mq
2399 q = repo.mq
2400 if not q.applied:
2400 if not q.applied:
2401 ui.status(_('no patches applied\n'))
2401 ui.status(_('no patches applied\n'))
2402 return 0
2402 return 0
2403
2403
2404 revs = cmdutil.revrange(repo, revrange)
2404 revs = cmdutil.revrange(repo, revrange)
2405 q.finish(repo, revs)
2405 q.finish(repo, revs)
2406 q.save_dirty()
2406 q.save_dirty()
2407 return 0
2407 return 0
2408
2408
2409 def reposetup(ui, repo):
2409 def reposetup(ui, repo):
2410 class mqrepo(repo.__class__):
2410 class mqrepo(repo.__class__):
2411 @util.propertycache
2411 @util.propertycache
2412 def mq(self):
2412 def mq(self):
2413 return queue(self.ui, self.join(""))
2413 return queue(self.ui, self.join(""))
2414
2414
2415 def abort_if_wdir_patched(self, errmsg, force=False):
2415 def abort_if_wdir_patched(self, errmsg, force=False):
2416 if self.mq.applied and not force:
2416 if self.mq.applied and not force:
2417 parent = hex(self.dirstate.parents()[0])
2417 parent = hex(self.dirstate.parents()[0])
2418 if parent in [s.rev for s in self.mq.applied]:
2418 if parent in [s.rev for s in self.mq.applied]:
2419 raise util.Abort(errmsg)
2419 raise util.Abort(errmsg)
2420
2420
2421 def commit(self, *args, **opts):
2421 def commit(self, *args, **opts):
2422 if len(args) >= 6:
2422 if len(args) >= 6:
2423 force = args[5]
2423 force = args[5]
2424 else:
2424 else:
2425 force = opts.get('force')
2425 force = opts.get('force')
2426 self.abort_if_wdir_patched(
2426 self.abort_if_wdir_patched(
2427 _('cannot commit over an applied mq patch'),
2427 _('cannot commit over an applied mq patch'),
2428 force)
2428 force)
2429
2429
2430 return super(mqrepo, self).commit(*args, **opts)
2430 return super(mqrepo, self).commit(*args, **opts)
2431
2431
2432 def push(self, remote, force=False, revs=None):
2432 def push(self, remote, force=False, revs=None):
2433 if self.mq.applied and not force and not revs:
2433 if self.mq.applied and not force and not revs:
2434 raise util.Abort(_('source has mq patches applied'))
2434 raise util.Abort(_('source has mq patches applied'))
2435 return super(mqrepo, self).push(remote, force, revs)
2435 return super(mqrepo, self).push(remote, force, revs)
2436
2436
2437 def tags(self):
2437 def tags(self):
2438 if self.tagscache:
2438 if self.tagscache:
2439 return self.tagscache
2439 return self.tagscache
2440
2440
2441 tagscache = super(mqrepo, self).tags()
2441 tagscache = super(mqrepo, self).tags()
2442
2442
2443 q = self.mq
2443 q = self.mq
2444 if not q.applied:
2444 if not q.applied:
2445 return tagscache
2445 return tagscache
2446
2446
2447 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2447 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2448
2448
2449 if mqtags[-1][0] not in self.changelog.nodemap:
2449 if mqtags[-1][0] not in self.changelog.nodemap:
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 % short(mqtags[-1][0]))
2451 % short(mqtags[-1][0]))
2452 return tagscache
2452 return tagscache
2453
2453
2454 mqtags.append((mqtags[-1][0], 'qtip'))
2454 mqtags.append((mqtags[-1][0], 'qtip'))
2455 mqtags.append((mqtags[0][0], 'qbase'))
2455 mqtags.append((mqtags[0][0], 'qbase'))
2456 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2456 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2457 for patch in mqtags:
2457 for patch in mqtags:
2458 if patch[1] in tagscache:
2458 if patch[1] in tagscache:
2459 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2459 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2460 % patch[1])
2460 % patch[1])
2461 else:
2461 else:
2462 tagscache[patch[1]] = patch[0]
2462 tagscache[patch[1]] = patch[0]
2463
2463
2464 return tagscache
2464 return tagscache
2465
2465
2466 def _branchtags(self, partial, lrev):
2466 def _branchtags(self, partial, lrev):
2467 q = self.mq
2467 q = self.mq
2468 if not q.applied:
2468 if not q.applied:
2469 return super(mqrepo, self)._branchtags(partial, lrev)
2469 return super(mqrepo, self)._branchtags(partial, lrev)
2470
2470
2471 cl = self.changelog
2471 cl = self.changelog
2472 qbasenode = bin(q.applied[0].rev)
2472 qbasenode = bin(q.applied[0].rev)
2473 if qbasenode not in cl.nodemap:
2473 if qbasenode not in cl.nodemap:
2474 self.ui.warn(_('mq status file refers to unknown node %s\n')
2474 self.ui.warn(_('mq status file refers to unknown node %s\n')
2475 % short(qbasenode))
2475 % short(qbasenode))
2476 return super(mqrepo, self)._branchtags(partial, lrev)
2476 return super(mqrepo, self)._branchtags(partial, lrev)
2477
2477
2478 qbase = cl.rev(qbasenode)
2478 qbase = cl.rev(qbasenode)
2479 start = lrev + 1
2479 start = lrev + 1
2480 if start < qbase:
2480 if start < qbase:
2481 # update the cache (excluding the patches) and save it
2481 # update the cache (excluding the patches) and save it
2482 self._updatebranchcache(partial, lrev+1, qbase)
2482 self._updatebranchcache(partial, lrev+1, qbase)
2483 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2483 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2484 start = qbase
2484 start = qbase
2485 # if start = qbase, the cache is as updated as it should be.
2485 # if start = qbase, the cache is as updated as it should be.
2486 # if start > qbase, the cache includes (part of) the patches.
2486 # if start > qbase, the cache includes (part of) the patches.
2487 # we might as well use it, but we won't save it.
2487 # we might as well use it, but we won't save it.
2488
2488
2489 # update the cache up to the tip
2489 # update the cache up to the tip
2490 self._updatebranchcache(partial, start, len(cl))
2490 self._updatebranchcache(partial, start, len(cl))
2491
2491
2492 return partial
2492 return partial
2493
2493
2494 if repo.local():
2494 if repo.local():
2495 repo.__class__ = mqrepo
2495 repo.__class__ = mqrepo
2496
2496
2497 def mqimport(orig, ui, repo, *args, **kwargs):
2497 def mqimport(orig, ui, repo, *args, **kwargs):
2498 if hasattr(repo, 'abort_if_wdir_patched'):
2498 if hasattr(repo, 'abort_if_wdir_patched'):
2499 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2499 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2500 kwargs.get('force'))
2500 kwargs.get('force'))
2501 return orig(ui, repo, *args, **kwargs)
2501 return orig(ui, repo, *args, **kwargs)
2502
2502
2503 def uisetup(ui):
2503 def uisetup(ui):
2504 extensions.wrapcommand(commands.table, 'import', mqimport)
2504 extensions.wrapcommand(commands.table, 'import', mqimport)
2505
2505
2506 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2506 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2507
2507
2508 cmdtable = {
2508 cmdtable = {
2509 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2509 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2510 "qclone":
2510 "qclone":
2511 (clone,
2511 (clone,
2512 [('', 'pull', None, _('use pull protocol to copy metadata')),
2512 [('', 'pull', None, _('use pull protocol to copy metadata')),
2513 ('U', 'noupdate', None, _('do not update the new working directories')),
2513 ('U', 'noupdate', None, _('do not update the new working directories')),
2514 ('', 'uncompressed', None,
2514 ('', 'uncompressed', None,
2515 _('use uncompressed transfer (fast over LAN)')),
2515 _('use uncompressed transfer (fast over LAN)')),
2516 ('p', 'patches', '', _('location of source patch repository')),
2516 ('p', 'patches', '', _('location of source patch repository')),
2517 ] + commands.remoteopts,
2517 ] + commands.remoteopts,
2518 _('hg qclone [OPTION]... SOURCE [DEST]')),
2518 _('hg qclone [OPTION]... SOURCE [DEST]')),
2519 "qcommit|qci":
2519 "qcommit|qci":
2520 (commit,
2520 (commit,
2521 commands.table["^commit|ci"][1],
2521 commands.table["^commit|ci"][1],
2522 _('hg qcommit [OPTION]... [FILE]...')),
2522 _('hg qcommit [OPTION]... [FILE]...')),
2523 "^qdiff":
2523 "^qdiff":
2524 (diff,
2524 (diff,
2525 commands.diffopts + commands.diffopts2 + commands.walkopts,
2525 commands.diffopts + commands.diffopts2 + commands.walkopts,
2526 _('hg qdiff [OPTION]... [FILE]...')),
2526 _('hg qdiff [OPTION]... [FILE]...')),
2527 "qdelete|qremove|qrm":
2527 "qdelete|qremove|qrm":
2528 (delete,
2528 (delete,
2529 [('k', 'keep', None, _('keep patch file')),
2529 [('k', 'keep', None, _('keep patch file')),
2530 ('r', 'rev', [], _('stop managing a revision'))],
2530 ('r', 'rev', [], _('stop managing a revision'))],
2531 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2531 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2532 'qfold':
2532 'qfold':
2533 (fold,
2533 (fold,
2534 [('e', 'edit', None, _('edit patch header')),
2534 [('e', 'edit', None, _('edit patch header')),
2535 ('k', 'keep', None, _('keep folded patch files')),
2535 ('k', 'keep', None, _('keep folded patch files')),
2536 ] + commands.commitopts,
2536 ] + commands.commitopts,
2537 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2537 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2538 'qgoto':
2538 'qgoto':
2539 (goto,
2539 (goto,
2540 [('f', 'force', None, _('overwrite any local changes'))],
2540 [('f', 'force', None, _('overwrite any local changes'))],
2541 _('hg qgoto [OPTION]... PATCH')),
2541 _('hg qgoto [OPTION]... PATCH')),
2542 'qguard':
2542 'qguard':
2543 (guard,
2543 (guard,
2544 [('l', 'list', None, _('list all patches and guards')),
2544 [('l', 'list', None, _('list all patches and guards')),
2545 ('n', 'none', None, _('drop all guards'))],
2545 ('n', 'none', None, _('drop all guards'))],
2546 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2546 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2547 'qheader': (header, [], _('hg qheader [PATCH]')),
2547 'qheader': (header, [], _('hg qheader [PATCH]')),
2548 "^qimport":
2548 "^qimport":
2549 (qimport,
2549 (qimport,
2550 [('e', 'existing', None, _('import file in patch directory')),
2550 [('e', 'existing', None, _('import file in patch directory')),
2551 ('n', 'name', '', _('patch file name')),
2551 ('n', 'name', '', _('patch file name')),
2552 ('f', 'force', None, _('overwrite existing files')),
2552 ('f', 'force', None, _('overwrite existing files')),
2553 ('r', 'rev', [], _('place existing revisions under mq control')),
2553 ('r', 'rev', [], _('place existing revisions under mq control')),
2554 ('g', 'git', None, _('use git extended diff format')),
2554 ('g', 'git', None, _('use git extended diff format')),
2555 ('P', 'push', None, _('qpush after importing'))],
2555 ('P', 'push', None, _('qpush after importing'))],
2556 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2556 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2557 "^qinit":
2557 "^qinit":
2558 (init,
2558 (init,
2559 [('c', 'create-repo', None, _('create queue repository'))],
2559 [('c', 'create-repo', None, _('create queue repository'))],
2560 _('hg qinit [-c]')),
2560 _('hg qinit [-c]')),
2561 "qnew":
2561 "qnew":
2562 (new,
2562 (new,
2563 [('e', 'edit', None, _('edit commit message')),
2563 [('e', 'edit', None, _('edit commit message')),
2564 ('f', 'force', None, _('import uncommitted changes into patch')),
2564 ('f', 'force', None, _('import uncommitted changes into patch')),
2565 ('g', 'git', None, _('use git extended diff format')),
2565 ('g', 'git', None, _('use git extended diff format')),
2566 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2566 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2567 ('u', 'user', '', _('add "From: <given user>" to patch')),
2567 ('u', 'user', '', _('add "From: <given user>" to patch')),
2568 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2568 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2569 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2569 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2570 ] + commands.walkopts + commands.commitopts,
2570 ] + commands.walkopts + commands.commitopts,
2571 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2571 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2572 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2572 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2573 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2573 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2574 "^qpop":
2574 "^qpop":
2575 (pop,
2575 (pop,
2576 [('a', 'all', None, _('pop all patches')),
2576 [('a', 'all', None, _('pop all patches')),
2577 ('n', 'name', '', _('queue name to pop')),
2577 ('n', 'name', '', _('queue name to pop')),
2578 ('f', 'force', None, _('forget any local changes'))],
2578 ('f', 'force', None, _('forget any local changes'))],
2579 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2579 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2580 "^qpush":
2580 "^qpush":
2581 (push,
2581 (push,
2582 [('f', 'force', None, _('apply if the patch has rejects')),
2582 [('f', 'force', None, _('apply if the patch has rejects')),
2583 ('l', 'list', None, _('list patch name in commit text')),
2583 ('l', 'list', None, _('list patch name in commit text')),
2584 ('a', 'all', None, _('apply all patches')),
2584 ('a', 'all', None, _('apply all patches')),
2585 ('m', 'merge', None, _('merge from another queue')),
2585 ('m', 'merge', None, _('merge from another queue')),
2586 ('n', 'name', '', _('merge queue name'))],
2586 ('n', 'name', '', _('merge queue name'))],
2587 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2587 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2588 "^qrefresh":
2588 "^qrefresh":
2589 (refresh,
2589 (refresh,
2590 [('e', 'edit', None, _('edit commit message')),
2590 [('e', 'edit', None, _('edit commit message')),
2591 ('g', 'git', None, _('use git extended diff format')),
2591 ('g', 'git', None, _('use git extended diff format')),
2592 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2592 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2593 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2593 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2594 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2594 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2595 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2595 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2596 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2596 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2597 ] + commands.walkopts + commands.commitopts,
2597 ] + commands.walkopts + commands.commitopts,
2598 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2598 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2599 'qrename|qmv':
2599 'qrename|qmv':
2600 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2600 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2601 "qrestore":
2601 "qrestore":
2602 (restore,
2602 (restore,
2603 [('d', 'delete', None, _('delete save entry')),
2603 [('d', 'delete', None, _('delete save entry')),
2604 ('u', 'update', None, _('update queue working directory'))],
2604 ('u', 'update', None, _('update queue working directory'))],
2605 _('hg qrestore [-d] [-u] REV')),
2605 _('hg qrestore [-d] [-u] REV')),
2606 "qsave":
2606 "qsave":
2607 (save,
2607 (save,
2608 [('c', 'copy', None, _('copy patch directory')),
2608 [('c', 'copy', None, _('copy patch directory')),
2609 ('n', 'name', '', _('copy directory name')),
2609 ('n', 'name', '', _('copy directory name')),
2610 ('e', 'empty', None, _('clear queue status file')),
2610 ('e', 'empty', None, _('clear queue status file')),
2611 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2611 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2612 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2612 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2613 "qselect":
2613 "qselect":
2614 (select,
2614 (select,
2615 [('n', 'none', None, _('disable all guards')),
2615 [('n', 'none', None, _('disable all guards')),
2616 ('s', 'series', None, _('list all guards in series file')),
2616 ('s', 'series', None, _('list all guards in series file')),
2617 ('', 'pop', None, _('pop to before first guarded applied patch')),
2617 ('', 'pop', None, _('pop to before first guarded applied patch')),
2618 ('', 'reapply', None, _('pop, then reapply patches'))],
2618 ('', 'reapply', None, _('pop, then reapply patches'))],
2619 _('hg qselect [OPTION]... [GUARD]...')),
2619 _('hg qselect [OPTION]... [GUARD]...')),
2620 "qseries":
2620 "qseries":
2621 (series,
2621 (series,
2622 [('m', 'missing', None, _('print patches not in series')),
2622 [('m', 'missing', None, _('print patches not in series')),
2623 ] + seriesopts,
2623 ] + seriesopts,
2624 _('hg qseries [-ms]')),
2624 _('hg qseries [-ms]')),
2625 "^strip":
2625 "^strip":
2626 (strip,
2626 (strip,
2627 [('f', 'force', None, _('force removal with local changes')),
2627 [('f', 'force', None, _('force removal with local changes')),
2628 ('b', 'backup', None, _('bundle unrelated changesets')),
2628 ('b', 'backup', None, _('bundle unrelated changesets')),
2629 ('n', 'nobackup', None, _('no backups'))],
2629 ('n', 'nobackup', None, _('no backups'))],
2630 _('hg strip [-f] [-b] [-n] REV')),
2630 _('hg strip [-f] [-b] [-n] REV')),
2631 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2631 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2632 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2632 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2633 "qfinish":
2633 "qfinish":
2634 (finish,
2634 (finish,
2635 [('a', 'applied', None, _('finish all applied changesets'))],
2635 [('a', 'applied', None, _('finish all applied changesets'))],
2636 _('hg qfinish [-a] [REV...]')),
2636 _('hg qfinish [-a] [REV...]')),
2637 }
2637 }
@@ -1,226 +1,226 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex
9 from node import hex
10 import util
10 import util
11 import cStringIO, os, stat, tarfile, time, zipfile
11 import cStringIO, os, stat, tarfile, time, zipfile
12 import zlib, gzip
12 import zlib, gzip
13
13
14 def tidyprefix(dest, prefix, suffixes):
14 def tidyprefix(dest, prefix, suffixes):
15 '''choose prefix to use for names in archive. make sure prefix is
15 '''choose prefix to use for names in archive. make sure prefix is
16 safe for consumers.'''
16 safe for consumers.'''
17
17
18 if prefix:
18 if prefix:
19 prefix = util.normpath(prefix)
19 prefix = util.normpath(prefix)
20 else:
20 else:
21 if not isinstance(dest, str):
21 if not isinstance(dest, str):
22 raise ValueError('dest must be string if no prefix')
22 raise ValueError('dest must be string if no prefix')
23 prefix = os.path.basename(dest)
23 prefix = os.path.basename(dest)
24 lower = prefix.lower()
24 lower = prefix.lower()
25 for sfx in suffixes:
25 for sfx in suffixes:
26 if lower.endswith(sfx):
26 if lower.endswith(sfx):
27 prefix = prefix[:-len(sfx)]
27 prefix = prefix[:-len(sfx)]
28 break
28 break
29 lpfx = os.path.normpath(util.localpath(prefix))
29 lpfx = os.path.normpath(util.localpath(prefix))
30 prefix = util.pconvert(lpfx)
30 prefix = util.pconvert(lpfx)
31 if not prefix.endswith('/'):
31 if not prefix.endswith('/'):
32 prefix += '/'
32 prefix += '/'
33 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
33 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
34 raise util.Abort(_('archive prefix contains illegal components'))
34 raise util.Abort(_('archive prefix contains illegal components'))
35 return prefix
35 return prefix
36
36
37 class tarit:
37 class tarit:
38 '''write archive to tar file or stream. can write uncompressed,
38 '''write archive to tar file or stream. can write uncompressed,
39 or compress with gzip or bzip2.'''
39 or compress with gzip or bzip2.'''
40
40
41 class GzipFileWithTime(gzip.GzipFile):
41 class GzipFileWithTime(gzip.GzipFile):
42
42
43 def __init__(self, *args, **kw):
43 def __init__(self, *args, **kw):
44 timestamp = None
44 timestamp = None
45 if 'timestamp' in kw:
45 if 'timestamp' in kw:
46 timestamp = kw.pop('timestamp')
46 timestamp = kw.pop('timestamp')
47 if timestamp == None:
47 if timestamp is None:
48 self.timestamp = time.time()
48 self.timestamp = time.time()
49 else:
49 else:
50 self.timestamp = timestamp
50 self.timestamp = timestamp
51 gzip.GzipFile.__init__(self, *args, **kw)
51 gzip.GzipFile.__init__(self, *args, **kw)
52
52
53 def _write_gzip_header(self):
53 def _write_gzip_header(self):
54 self.fileobj.write('\037\213') # magic header
54 self.fileobj.write('\037\213') # magic header
55 self.fileobj.write('\010') # compression method
55 self.fileobj.write('\010') # compression method
56 # Python 2.6 deprecates self.filename
56 # Python 2.6 deprecates self.filename
57 fname = getattr(self, 'name', None) or self.filename
57 fname = getattr(self, 'name', None) or self.filename
58 flags = 0
58 flags = 0
59 if fname:
59 if fname:
60 flags = gzip.FNAME
60 flags = gzip.FNAME
61 self.fileobj.write(chr(flags))
61 self.fileobj.write(chr(flags))
62 gzip.write32u(self.fileobj, long(self.timestamp))
62 gzip.write32u(self.fileobj, long(self.timestamp))
63 self.fileobj.write('\002')
63 self.fileobj.write('\002')
64 self.fileobj.write('\377')
64 self.fileobj.write('\377')
65 if fname:
65 if fname:
66 self.fileobj.write(fname + '\000')
66 self.fileobj.write(fname + '\000')
67
67
68 def __init__(self, dest, prefix, mtime, kind=''):
68 def __init__(self, dest, prefix, mtime, kind=''):
69 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
69 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
70 '.tgz', '.tbz2'])
70 '.tgz', '.tbz2'])
71 self.mtime = mtime
71 self.mtime = mtime
72
72
73 def taropen(name, mode, fileobj=None):
73 def taropen(name, mode, fileobj=None):
74 if kind == 'gz':
74 if kind == 'gz':
75 mode = mode[0]
75 mode = mode[0]
76 if not fileobj:
76 if not fileobj:
77 fileobj = open(name, mode + 'b')
77 fileobj = open(name, mode + 'b')
78 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
78 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
79 zlib.Z_BEST_COMPRESSION,
79 zlib.Z_BEST_COMPRESSION,
80 fileobj, timestamp=mtime)
80 fileobj, timestamp=mtime)
81 return tarfile.TarFile.taropen(name, mode, gzfileobj)
81 return tarfile.TarFile.taropen(name, mode, gzfileobj)
82 else:
82 else:
83 return tarfile.open(name, mode + kind, fileobj)
83 return tarfile.open(name, mode + kind, fileobj)
84
84
85 if isinstance(dest, str):
85 if isinstance(dest, str):
86 self.z = taropen(dest, mode='w:')
86 self.z = taropen(dest, mode='w:')
87 else:
87 else:
88 # Python 2.5-2.5.1 have a regression that requires a name arg
88 # Python 2.5-2.5.1 have a regression that requires a name arg
89 self.z = taropen(name='', mode='w|', fileobj=dest)
89 self.z = taropen(name='', mode='w|', fileobj=dest)
90
90
91 def addfile(self, name, mode, islink, data):
91 def addfile(self, name, mode, islink, data):
92 i = tarfile.TarInfo(self.prefix + name)
92 i = tarfile.TarInfo(self.prefix + name)
93 i.mtime = self.mtime
93 i.mtime = self.mtime
94 i.size = len(data)
94 i.size = len(data)
95 if islink:
95 if islink:
96 i.type = tarfile.SYMTYPE
96 i.type = tarfile.SYMTYPE
97 i.mode = 0777
97 i.mode = 0777
98 i.linkname = data
98 i.linkname = data
99 data = None
99 data = None
100 i.size = 0
100 i.size = 0
101 else:
101 else:
102 i.mode = mode
102 i.mode = mode
103 data = cStringIO.StringIO(data)
103 data = cStringIO.StringIO(data)
104 self.z.addfile(i, data)
104 self.z.addfile(i, data)
105
105
106 def done(self):
106 def done(self):
107 self.z.close()
107 self.z.close()
108
108
109 class tellable:
109 class tellable:
110 '''provide tell method for zipfile.ZipFile when writing to http
110 '''provide tell method for zipfile.ZipFile when writing to http
111 response file object.'''
111 response file object.'''
112
112
113 def __init__(self, fp):
113 def __init__(self, fp):
114 self.fp = fp
114 self.fp = fp
115 self.offset = 0
115 self.offset = 0
116
116
117 def __getattr__(self, key):
117 def __getattr__(self, key):
118 return getattr(self.fp, key)
118 return getattr(self.fp, key)
119
119
120 def write(self, s):
120 def write(self, s):
121 self.fp.write(s)
121 self.fp.write(s)
122 self.offset += len(s)
122 self.offset += len(s)
123
123
124 def tell(self):
124 def tell(self):
125 return self.offset
125 return self.offset
126
126
127 class zipit:
127 class zipit:
128 '''write archive to zip file or stream. can write uncompressed,
128 '''write archive to zip file or stream. can write uncompressed,
129 or compressed with deflate.'''
129 or compressed with deflate.'''
130
130
131 def __init__(self, dest, prefix, mtime, compress=True):
131 def __init__(self, dest, prefix, mtime, compress=True):
132 self.prefix = tidyprefix(dest, prefix, ('.zip',))
132 self.prefix = tidyprefix(dest, prefix, ('.zip',))
133 if not isinstance(dest, str):
133 if not isinstance(dest, str):
134 try:
134 try:
135 dest.tell()
135 dest.tell()
136 except (AttributeError, IOError):
136 except (AttributeError, IOError):
137 dest = tellable(dest)
137 dest = tellable(dest)
138 self.z = zipfile.ZipFile(dest, 'w',
138 self.z = zipfile.ZipFile(dest, 'w',
139 compress and zipfile.ZIP_DEFLATED or
139 compress and zipfile.ZIP_DEFLATED or
140 zipfile.ZIP_STORED)
140 zipfile.ZIP_STORED)
141 self.date_time = time.gmtime(mtime)[:6]
141 self.date_time = time.gmtime(mtime)[:6]
142
142
143 def addfile(self, name, mode, islink, data):
143 def addfile(self, name, mode, islink, data):
144 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
144 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
145 i.compress_type = self.z.compression
145 i.compress_type = self.z.compression
146 # unzip will not honor unix file modes unless file creator is
146 # unzip will not honor unix file modes unless file creator is
147 # set to unix (id 3).
147 # set to unix (id 3).
148 i.create_system = 3
148 i.create_system = 3
149 ftype = stat.S_IFREG
149 ftype = stat.S_IFREG
150 if islink:
150 if islink:
151 mode = 0777
151 mode = 0777
152 ftype = stat.S_IFLNK
152 ftype = stat.S_IFLNK
153 i.external_attr = (mode | ftype) << 16L
153 i.external_attr = (mode | ftype) << 16L
154 self.z.writestr(i, data)
154 self.z.writestr(i, data)
155
155
156 def done(self):
156 def done(self):
157 self.z.close()
157 self.z.close()
158
158
159 class fileit:
159 class fileit:
160 '''write archive as files in directory.'''
160 '''write archive as files in directory.'''
161
161
162 def __init__(self, name, prefix, mtime):
162 def __init__(self, name, prefix, mtime):
163 if prefix:
163 if prefix:
164 raise util.Abort(_('cannot give prefix when archiving to files'))
164 raise util.Abort(_('cannot give prefix when archiving to files'))
165 self.basedir = name
165 self.basedir = name
166 self.opener = util.opener(self.basedir)
166 self.opener = util.opener(self.basedir)
167
167
168 def addfile(self, name, mode, islink, data):
168 def addfile(self, name, mode, islink, data):
169 if islink:
169 if islink:
170 self.opener.symlink(data, name)
170 self.opener.symlink(data, name)
171 return
171 return
172 f = self.opener(name, "w", atomictemp=True)
172 f = self.opener(name, "w", atomictemp=True)
173 f.write(data)
173 f.write(data)
174 f.rename()
174 f.rename()
175 destfile = os.path.join(self.basedir, name)
175 destfile = os.path.join(self.basedir, name)
176 os.chmod(destfile, mode)
176 os.chmod(destfile, mode)
177
177
178 def done(self):
178 def done(self):
179 pass
179 pass
180
180
181 archivers = {
181 archivers = {
182 'files': fileit,
182 'files': fileit,
183 'tar': tarit,
183 'tar': tarit,
184 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
184 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
185 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
185 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
186 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
186 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
187 'zip': zipit,
187 'zip': zipit,
188 }
188 }
189
189
190 def archive(repo, dest, node, kind, decode=True, matchfn=None,
190 def archive(repo, dest, node, kind, decode=True, matchfn=None,
191 prefix=None, mtime=None):
191 prefix=None, mtime=None):
192 '''create archive of repo as it was at node.
192 '''create archive of repo as it was at node.
193
193
194 dest can be name of directory, name of archive file, or file
194 dest can be name of directory, name of archive file, or file
195 object to write archive to.
195 object to write archive to.
196
196
197 kind is type of archive to create.
197 kind is type of archive to create.
198
198
199 decode tells whether to put files through decode filters from
199 decode tells whether to put files through decode filters from
200 hgrc.
200 hgrc.
201
201
202 matchfn is function to filter names of files to write to archive.
202 matchfn is function to filter names of files to write to archive.
203
203
204 prefix is name of path to put before every archive member.'''
204 prefix is name of path to put before every archive member.'''
205
205
206 def write(name, mode, islink, getdata):
206 def write(name, mode, islink, getdata):
207 if matchfn and not matchfn(name): return
207 if matchfn and not matchfn(name): return
208 data = getdata()
208 data = getdata()
209 if decode:
209 if decode:
210 data = repo.wwritedata(name, data)
210 data = repo.wwritedata(name, data)
211 archiver.addfile(name, mode, islink, data)
211 archiver.addfile(name, mode, islink, data)
212
212
213 if kind not in archivers:
213 if kind not in archivers:
214 raise util.Abort(_("unknown archive type '%s'") % kind)
214 raise util.Abort(_("unknown archive type '%s'") % kind)
215
215
216 ctx = repo[node]
216 ctx = repo[node]
217 archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
217 archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
218
218
219 if repo.ui.configbool("ui", "archivemeta", True):
219 if repo.ui.configbool("ui", "archivemeta", True):
220 write('.hg_archival.txt', 0644, False,
220 write('.hg_archival.txt', 0644, False,
221 lambda: 'repo: %s\nnode: %s\n' % (
221 lambda: 'repo: %s\nnode: %s\n' % (
222 hex(repo.changelog.node(0)), hex(node)))
222 hex(repo.changelog.node(0)), hex(node)))
223 for f in ctx:
223 for f in ctx:
224 ff = ctx.flags(f)
224 ff = ctx.flags(f)
225 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
225 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
226 archiver.done()
226 archiver.done()
@@ -1,814 +1,814 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util
10 import ancestor, bdiff, error, util
11 import os, errno
11 import os, errno
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 class changectx(object):
15 class changectx(object):
16 """A changecontext object makes access to data related to a particular
16 """A changecontext object makes access to data related to a particular
17 changeset convenient."""
17 changeset convenient."""
18 def __init__(self, repo, changeid=''):
18 def __init__(self, repo, changeid=''):
19 """changeid is a revision number, node, or tag"""
19 """changeid is a revision number, node, or tag"""
20 if changeid == '':
20 if changeid == '':
21 changeid = '.'
21 changeid = '.'
22 self._repo = repo
22 self._repo = repo
23 if isinstance(changeid, (long, int)):
23 if isinstance(changeid, (long, int)):
24 self._rev = changeid
24 self._rev = changeid
25 self._node = self._repo.changelog.node(changeid)
25 self._node = self._repo.changelog.node(changeid)
26 else:
26 else:
27 self._node = self._repo.lookup(changeid)
27 self._node = self._repo.lookup(changeid)
28 self._rev = self._repo.changelog.rev(self._node)
28 self._rev = self._repo.changelog.rev(self._node)
29
29
30 def __str__(self):
30 def __str__(self):
31 return short(self.node())
31 return short(self.node())
32
32
33 def __int__(self):
33 def __int__(self):
34 return self.rev()
34 return self.rev()
35
35
36 def __repr__(self):
36 def __repr__(self):
37 return "<changectx %s>" % str(self)
37 return "<changectx %s>" % str(self)
38
38
39 def __hash__(self):
39 def __hash__(self):
40 try:
40 try:
41 return hash(self._rev)
41 return hash(self._rev)
42 except AttributeError:
42 except AttributeError:
43 return id(self)
43 return id(self)
44
44
45 def __eq__(self, other):
45 def __eq__(self, other):
46 try:
46 try:
47 return self._rev == other._rev
47 return self._rev == other._rev
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50
50
51 def __ne__(self, other):
51 def __ne__(self, other):
52 return not (self == other)
52 return not (self == other)
53
53
54 def __nonzero__(self):
54 def __nonzero__(self):
55 return self._rev != nullrev
55 return self._rev != nullrev
56
56
57 @propertycache
57 @propertycache
58 def _changeset(self):
58 def _changeset(self):
59 return self._repo.changelog.read(self.node())
59 return self._repo.changelog.read(self.node())
60
60
61 @propertycache
61 @propertycache
62 def _manifest(self):
62 def _manifest(self):
63 return self._repo.manifest.read(self._changeset[0])
63 return self._repo.manifest.read(self._changeset[0])
64
64
65 @propertycache
65 @propertycache
66 def _manifestdelta(self):
66 def _manifestdelta(self):
67 return self._repo.manifest.readdelta(self._changeset[0])
67 return self._repo.manifest.readdelta(self._changeset[0])
68
68
69 @propertycache
69 @propertycache
70 def _parents(self):
70 def _parents(self):
71 p = self._repo.changelog.parentrevs(self._rev)
71 p = self._repo.changelog.parentrevs(self._rev)
72 if p[1] == nullrev:
72 if p[1] == nullrev:
73 p = p[:-1]
73 p = p[:-1]
74 return [changectx(self._repo, x) for x in p]
74 return [changectx(self._repo, x) for x in p]
75
75
76 def __contains__(self, key):
76 def __contains__(self, key):
77 return key in self._manifest
77 return key in self._manifest
78
78
79 def __getitem__(self, key):
79 def __getitem__(self, key):
80 return self.filectx(key)
80 return self.filectx(key)
81
81
82 def __iter__(self):
82 def __iter__(self):
83 for f in sorted(self._manifest):
83 for f in sorted(self._manifest):
84 yield f
84 yield f
85
85
86 def changeset(self): return self._changeset
86 def changeset(self): return self._changeset
87 def manifest(self): return self._manifest
87 def manifest(self): return self._manifest
88 def manifestnode(self): return self._changeset[0]
88 def manifestnode(self): return self._changeset[0]
89
89
90 def rev(self): return self._rev
90 def rev(self): return self._rev
91 def node(self): return self._node
91 def node(self): return self._node
92 def hex(self): return hex(self._node)
92 def hex(self): return hex(self._node)
93 def user(self): return self._changeset[1]
93 def user(self): return self._changeset[1]
94 def date(self): return self._changeset[2]
94 def date(self): return self._changeset[2]
95 def files(self): return self._changeset[3]
95 def files(self): return self._changeset[3]
96 def description(self): return self._changeset[4]
96 def description(self): return self._changeset[4]
97 def branch(self): return self._changeset[5].get("branch")
97 def branch(self): return self._changeset[5].get("branch")
98 def extra(self): return self._changeset[5]
98 def extra(self): return self._changeset[5]
99 def tags(self): return self._repo.nodetags(self._node)
99 def tags(self): return self._repo.nodetags(self._node)
100
100
101 def parents(self):
101 def parents(self):
102 """return contexts for each parent changeset"""
102 """return contexts for each parent changeset"""
103 return self._parents
103 return self._parents
104
104
105 def p1(self):
105 def p1(self):
106 return self._parents[0]
106 return self._parents[0]
107
107
108 def p2(self):
108 def p2(self):
109 if len(self._parents) == 2:
109 if len(self._parents) == 2:
110 return self._parents[1]
110 return self._parents[1]
111 return changectx(self._repo, -1)
111 return changectx(self._repo, -1)
112
112
113 def children(self):
113 def children(self):
114 """return contexts for each child changeset"""
114 """return contexts for each child changeset"""
115 c = self._repo.changelog.children(self._node)
115 c = self._repo.changelog.children(self._node)
116 return [changectx(self._repo, x) for x in c]
116 return [changectx(self._repo, x) for x in c]
117
117
118 def ancestors(self):
118 def ancestors(self):
119 for a in self._repo.changelog.ancestors(self._rev):
119 for a in self._repo.changelog.ancestors(self._rev):
120 yield changectx(self._repo, a)
120 yield changectx(self._repo, a)
121
121
122 def descendants(self):
122 def descendants(self):
123 for d in self._repo.changelog.descendants(self._rev):
123 for d in self._repo.changelog.descendants(self._rev):
124 yield changectx(self._repo, d)
124 yield changectx(self._repo, d)
125
125
126 def _fileinfo(self, path):
126 def _fileinfo(self, path):
127 if '_manifest' in self.__dict__:
127 if '_manifest' in self.__dict__:
128 try:
128 try:
129 return self._manifest[path], self._manifest.flags(path)
129 return self._manifest[path], self._manifest.flags(path)
130 except KeyError:
130 except KeyError:
131 raise error.LookupError(self._node, path,
131 raise error.LookupError(self._node, path,
132 _('not found in manifest'))
132 _('not found in manifest'))
133 if '_manifestdelta' in self.__dict__ or path in self.files():
133 if '_manifestdelta' in self.__dict__ or path in self.files():
134 if path in self._manifestdelta:
134 if path in self._manifestdelta:
135 return self._manifestdelta[path], self._manifestdelta.flags(path)
135 return self._manifestdelta[path], self._manifestdelta.flags(path)
136 node, flag = self._repo.manifest.find(self._changeset[0], path)
136 node, flag = self._repo.manifest.find(self._changeset[0], path)
137 if not node:
137 if not node:
138 raise error.LookupError(self._node, path,
138 raise error.LookupError(self._node, path,
139 _('not found in manifest'))
139 _('not found in manifest'))
140
140
141 return node, flag
141 return node, flag
142
142
143 def filenode(self, path):
143 def filenode(self, path):
144 return self._fileinfo(path)[0]
144 return self._fileinfo(path)[0]
145
145
146 def flags(self, path):
146 def flags(self, path):
147 try:
147 try:
148 return self._fileinfo(path)[1]
148 return self._fileinfo(path)[1]
149 except error.LookupError:
149 except error.LookupError:
150 return ''
150 return ''
151
151
152 def filectx(self, path, fileid=None, filelog=None):
152 def filectx(self, path, fileid=None, filelog=None):
153 """get a file context from this changeset"""
153 """get a file context from this changeset"""
154 if fileid is None:
154 if fileid is None:
155 fileid = self.filenode(path)
155 fileid = self.filenode(path)
156 return filectx(self._repo, path, fileid=fileid,
156 return filectx(self._repo, path, fileid=fileid,
157 changectx=self, filelog=filelog)
157 changectx=self, filelog=filelog)
158
158
159 def ancestor(self, c2):
159 def ancestor(self, c2):
160 """
160 """
161 return the ancestor context of self and c2
161 return the ancestor context of self and c2
162 """
162 """
163 n = self._repo.changelog.ancestor(self._node, c2._node)
163 n = self._repo.changelog.ancestor(self._node, c2._node)
164 return changectx(self._repo, n)
164 return changectx(self._repo, n)
165
165
166 def walk(self, match):
166 def walk(self, match):
167 fset = set(match.files())
167 fset = set(match.files())
168 # for dirstate.walk, files=['.'] means "walk the whole tree".
168 # for dirstate.walk, files=['.'] means "walk the whole tree".
169 # follow that here, too
169 # follow that here, too
170 fset.discard('.')
170 fset.discard('.')
171 for fn in self:
171 for fn in self:
172 for ffn in fset:
172 for ffn in fset:
173 # match if the file is the exact name or a directory
173 # match if the file is the exact name or a directory
174 if ffn == fn or fn.startswith("%s/" % ffn):
174 if ffn == fn or fn.startswith("%s/" % ffn):
175 fset.remove(ffn)
175 fset.remove(ffn)
176 break
176 break
177 if match(fn):
177 if match(fn):
178 yield fn
178 yield fn
179 for fn in sorted(fset):
179 for fn in sorted(fset):
180 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
180 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
181 yield fn
181 yield fn
182
182
183 class filectx(object):
183 class filectx(object):
184 """A filecontext object makes access to data related to a particular
184 """A filecontext object makes access to data related to a particular
185 filerevision convenient."""
185 filerevision convenient."""
186 def __init__(self, repo, path, changeid=None, fileid=None,
186 def __init__(self, repo, path, changeid=None, fileid=None,
187 filelog=None, changectx=None):
187 filelog=None, changectx=None):
188 """changeid can be a changeset revision, node, or tag.
188 """changeid can be a changeset revision, node, or tag.
189 fileid can be a file revision or node."""
189 fileid can be a file revision or node."""
190 self._repo = repo
190 self._repo = repo
191 self._path = path
191 self._path = path
192
192
193 assert (changeid is not None
193 assert (changeid is not None
194 or fileid is not None
194 or fileid is not None
195 or changectx is not None)
195 or changectx is not None)
196
196
197 if filelog:
197 if filelog:
198 self._filelog = filelog
198 self._filelog = filelog
199
199
200 if changeid is not None:
200 if changeid is not None:
201 self._changeid = changeid
201 self._changeid = changeid
202 if changectx is not None:
202 if changectx is not None:
203 self._changectx = changectx
203 self._changectx = changectx
204 if fileid is not None:
204 if fileid is not None:
205 self._fileid = fileid
205 self._fileid = fileid
206
206
207 @propertycache
207 @propertycache
208 def _changectx(self):
208 def _changectx(self):
209 return changectx(self._repo, self._changeid)
209 return changectx(self._repo, self._changeid)
210
210
211 @propertycache
211 @propertycache
212 def _filelog(self):
212 def _filelog(self):
213 return self._repo.file(self._path)
213 return self._repo.file(self._path)
214
214
215 @propertycache
215 @propertycache
216 def _changeid(self):
216 def _changeid(self):
217 if '_changectx' in self.__dict__:
217 if '_changectx' in self.__dict__:
218 return self._changectx.rev()
218 return self._changectx.rev()
219 else:
219 else:
220 return self._filelog.linkrev(self._filerev)
220 return self._filelog.linkrev(self._filerev)
221
221
222 @propertycache
222 @propertycache
223 def _filenode(self):
223 def _filenode(self):
224 if '_fileid' in self.__dict__:
224 if '_fileid' in self.__dict__:
225 return self._filelog.lookup(self._fileid)
225 return self._filelog.lookup(self._fileid)
226 else:
226 else:
227 return self._changectx.filenode(self._path)
227 return self._changectx.filenode(self._path)
228
228
229 @propertycache
229 @propertycache
230 def _filerev(self):
230 def _filerev(self):
231 return self._filelog.rev(self._filenode)
231 return self._filelog.rev(self._filenode)
232
232
233 @propertycache
233 @propertycache
234 def _repopath(self):
234 def _repopath(self):
235 return self._path
235 return self._path
236
236
237 def __nonzero__(self):
237 def __nonzero__(self):
238 try:
238 try:
239 self._filenode
239 self._filenode
240 return True
240 return True
241 except error.LookupError:
241 except error.LookupError:
242 # file is missing
242 # file is missing
243 return False
243 return False
244
244
245 def __str__(self):
245 def __str__(self):
246 return "%s@%s" % (self.path(), short(self.node()))
246 return "%s@%s" % (self.path(), short(self.node()))
247
247
248 def __repr__(self):
248 def __repr__(self):
249 return "<filectx %s>" % str(self)
249 return "<filectx %s>" % str(self)
250
250
251 def __hash__(self):
251 def __hash__(self):
252 try:
252 try:
253 return hash((self._path, self._fileid))
253 return hash((self._path, self._fileid))
254 except AttributeError:
254 except AttributeError:
255 return id(self)
255 return id(self)
256
256
257 def __eq__(self, other):
257 def __eq__(self, other):
258 try:
258 try:
259 return (self._path == other._path
259 return (self._path == other._path
260 and self._fileid == other._fileid)
260 and self._fileid == other._fileid)
261 except AttributeError:
261 except AttributeError:
262 return False
262 return False
263
263
264 def __ne__(self, other):
264 def __ne__(self, other):
265 return not (self == other)
265 return not (self == other)
266
266
267 def filectx(self, fileid):
267 def filectx(self, fileid):
268 '''opens an arbitrary revision of the file without
268 '''opens an arbitrary revision of the file without
269 opening a new filelog'''
269 opening a new filelog'''
270 return filectx(self._repo, self._path, fileid=fileid,
270 return filectx(self._repo, self._path, fileid=fileid,
271 filelog=self._filelog)
271 filelog=self._filelog)
272
272
273 def filerev(self): return self._filerev
273 def filerev(self): return self._filerev
274 def filenode(self): return self._filenode
274 def filenode(self): return self._filenode
275 def flags(self): return self._changectx.flags(self._path)
275 def flags(self): return self._changectx.flags(self._path)
276 def filelog(self): return self._filelog
276 def filelog(self): return self._filelog
277
277
278 def rev(self):
278 def rev(self):
279 if '_changectx' in self.__dict__:
279 if '_changectx' in self.__dict__:
280 return self._changectx.rev()
280 return self._changectx.rev()
281 if '_changeid' in self.__dict__:
281 if '_changeid' in self.__dict__:
282 return self._changectx.rev()
282 return self._changectx.rev()
283 return self._filelog.linkrev(self._filerev)
283 return self._filelog.linkrev(self._filerev)
284
284
285 def linkrev(self): return self._filelog.linkrev(self._filerev)
285 def linkrev(self): return self._filelog.linkrev(self._filerev)
286 def node(self): return self._changectx.node()
286 def node(self): return self._changectx.node()
287 def user(self): return self._changectx.user()
287 def user(self): return self._changectx.user()
288 def date(self): return self._changectx.date()
288 def date(self): return self._changectx.date()
289 def files(self): return self._changectx.files()
289 def files(self): return self._changectx.files()
290 def description(self): return self._changectx.description()
290 def description(self): return self._changectx.description()
291 def branch(self): return self._changectx.branch()
291 def branch(self): return self._changectx.branch()
292 def manifest(self): return self._changectx.manifest()
292 def manifest(self): return self._changectx.manifest()
293 def changectx(self): return self._changectx
293 def changectx(self): return self._changectx
294
294
295 def data(self): return self._filelog.read(self._filenode)
295 def data(self): return self._filelog.read(self._filenode)
296 def path(self): return self._path
296 def path(self): return self._path
297 def size(self): return self._filelog.size(self._filerev)
297 def size(self): return self._filelog.size(self._filerev)
298
298
299 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
299 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
300
300
301 def renamed(self):
301 def renamed(self):
302 """check if file was actually renamed in this changeset revision
302 """check if file was actually renamed in this changeset revision
303
303
304 If rename logged in file revision, we report copy for changeset only
304 If rename logged in file revision, we report copy for changeset only
305 if file revisions linkrev points back to the changeset in question
305 if file revisions linkrev points back to the changeset in question
306 or both changeset parents contain different file revisions.
306 or both changeset parents contain different file revisions.
307 """
307 """
308
308
309 renamed = self._filelog.renamed(self._filenode)
309 renamed = self._filelog.renamed(self._filenode)
310 if not renamed:
310 if not renamed:
311 return renamed
311 return renamed
312
312
313 if self.rev() == self.linkrev():
313 if self.rev() == self.linkrev():
314 return renamed
314 return renamed
315
315
316 name = self.path()
316 name = self.path()
317 fnode = self._filenode
317 fnode = self._filenode
318 for p in self._changectx.parents():
318 for p in self._changectx.parents():
319 try:
319 try:
320 if fnode == p.filenode(name):
320 if fnode == p.filenode(name):
321 return None
321 return None
322 except error.LookupError:
322 except error.LookupError:
323 pass
323 pass
324 return renamed
324 return renamed
325
325
326 def parents(self):
326 def parents(self):
327 p = self._path
327 p = self._path
328 fl = self._filelog
328 fl = self._filelog
329 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
329 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
330
330
331 r = self._filelog.renamed(self._filenode)
331 r = self._filelog.renamed(self._filenode)
332 if r:
332 if r:
333 pl[0] = (r[0], r[1], None)
333 pl[0] = (r[0], r[1], None)
334
334
335 return [filectx(self._repo, p, fileid=n, filelog=l)
335 return [filectx(self._repo, p, fileid=n, filelog=l)
336 for p,n,l in pl if n != nullid]
336 for p,n,l in pl if n != nullid]
337
337
338 def children(self):
338 def children(self):
339 # hard for renames
339 # hard for renames
340 c = self._filelog.children(self._filenode)
340 c = self._filelog.children(self._filenode)
341 return [filectx(self._repo, self._path, fileid=x,
341 return [filectx(self._repo, self._path, fileid=x,
342 filelog=self._filelog) for x in c]
342 filelog=self._filelog) for x in c]
343
343
344 def annotate(self, follow=False, linenumber=None):
344 def annotate(self, follow=False, linenumber=None):
345 '''returns a list of tuples of (ctx, line) for each line
345 '''returns a list of tuples of (ctx, line) for each line
346 in the file, where ctx is the filectx of the node where
346 in the file, where ctx is the filectx of the node where
347 that line was last changed.
347 that line was last changed.
348 This returns tuples of ((ctx, linenumber), line) for each line,
348 This returns tuples of ((ctx, linenumber), line) for each line,
349 if "linenumber" parameter is NOT "None".
349 if "linenumber" parameter is NOT "None".
350 In such tuples, linenumber means one at the first appearance
350 In such tuples, linenumber means one at the first appearance
351 in the managed file.
351 in the managed file.
352 To reduce annotation cost,
352 To reduce annotation cost,
353 this returns fixed value(False is used) as linenumber,
353 this returns fixed value(False is used) as linenumber,
354 if "linenumber" parameter is "False".'''
354 if "linenumber" parameter is "False".'''
355
355
356 def decorate_compat(text, rev):
356 def decorate_compat(text, rev):
357 return ([rev] * len(text.splitlines()), text)
357 return ([rev] * len(text.splitlines()), text)
358
358
359 def without_linenumber(text, rev):
359 def without_linenumber(text, rev):
360 return ([(rev, False)] * len(text.splitlines()), text)
360 return ([(rev, False)] * len(text.splitlines()), text)
361
361
362 def with_linenumber(text, rev):
362 def with_linenumber(text, rev):
363 size = len(text.splitlines())
363 size = len(text.splitlines())
364 return ([(rev, i) for i in xrange(1, size + 1)], text)
364 return ([(rev, i) for i in xrange(1, size + 1)], text)
365
365
366 decorate = (((linenumber is None) and decorate_compat) or
366 decorate = (((linenumber is None) and decorate_compat) or
367 (linenumber and with_linenumber) or
367 (linenumber and with_linenumber) or
368 without_linenumber)
368 without_linenumber)
369
369
370 def pair(parent, child):
370 def pair(parent, child):
371 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
371 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
372 child[0][b1:b2] = parent[0][a1:a2]
372 child[0][b1:b2] = parent[0][a1:a2]
373 return child
373 return child
374
374
375 getlog = util.cachefunc(lambda x: self._repo.file(x))
375 getlog = util.cachefunc(lambda x: self._repo.file(x))
376 def getctx(path, fileid):
376 def getctx(path, fileid):
377 log = path == self._path and self._filelog or getlog(path)
377 log = path == self._path and self._filelog or getlog(path)
378 return filectx(self._repo, path, fileid=fileid, filelog=log)
378 return filectx(self._repo, path, fileid=fileid, filelog=log)
379 getctx = util.cachefunc(getctx)
379 getctx = util.cachefunc(getctx)
380
380
381 def parents(f):
381 def parents(f):
382 # we want to reuse filectx objects as much as possible
382 # we want to reuse filectx objects as much as possible
383 p = f._path
383 p = f._path
384 if f._filerev is None: # working dir
384 if f._filerev is None: # working dir
385 pl = [(n.path(), n.filerev()) for n in f.parents()]
385 pl = [(n.path(), n.filerev()) for n in f.parents()]
386 else:
386 else:
387 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
387 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
388
388
389 if follow:
389 if follow:
390 r = f.renamed()
390 r = f.renamed()
391 if r:
391 if r:
392 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
392 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
393
393
394 return [getctx(p, n) for p, n in pl if n != nullrev]
394 return [getctx(p, n) for p, n in pl if n != nullrev]
395
395
396 # use linkrev to find the first changeset where self appeared
396 # use linkrev to find the first changeset where self appeared
397 if self.rev() != self.linkrev():
397 if self.rev() != self.linkrev():
398 base = self.filectx(self.filerev())
398 base = self.filectx(self.filerev())
399 else:
399 else:
400 base = self
400 base = self
401
401
402 # find all ancestors
402 # find all ancestors
403 needed = {base: 1}
403 needed = {base: 1}
404 visit = [base]
404 visit = [base]
405 files = [base._path]
405 files = [base._path]
406 while visit:
406 while visit:
407 f = visit.pop(0)
407 f = visit.pop(0)
408 for p in parents(f):
408 for p in parents(f):
409 if p not in needed:
409 if p not in needed:
410 needed[p] = 1
410 needed[p] = 1
411 visit.append(p)
411 visit.append(p)
412 if p._path not in files:
412 if p._path not in files:
413 files.append(p._path)
413 files.append(p._path)
414 else:
414 else:
415 # count how many times we'll use this
415 # count how many times we'll use this
416 needed[p] += 1
416 needed[p] += 1
417
417
418 # sort by revision (per file) which is a topological order
418 # sort by revision (per file) which is a topological order
419 visit = []
419 visit = []
420 for f in files:
420 for f in files:
421 fn = [(n.rev(), n) for n in needed if n._path == f]
421 fn = [(n.rev(), n) for n in needed if n._path == f]
422 visit.extend(fn)
422 visit.extend(fn)
423
423
424 hist = {}
424 hist = {}
425 for r, f in sorted(visit):
425 for r, f in sorted(visit):
426 curr = decorate(f.data(), f)
426 curr = decorate(f.data(), f)
427 for p in parents(f):
427 for p in parents(f):
428 if p != nullid:
428 if p != nullid:
429 curr = pair(hist[p], curr)
429 curr = pair(hist[p], curr)
430 # trim the history of unneeded revs
430 # trim the history of unneeded revs
431 needed[p] -= 1
431 needed[p] -= 1
432 if not needed[p]:
432 if not needed[p]:
433 del hist[p]
433 del hist[p]
434 hist[f] = curr
434 hist[f] = curr
435
435
436 return zip(hist[f][0], hist[f][1].splitlines(1))
436 return zip(hist[f][0], hist[f][1].splitlines(1))
437
437
438 def ancestor(self, fc2):
438 def ancestor(self, fc2):
439 """
439 """
440 find the common ancestor file context, if any, of self, and fc2
440 find the common ancestor file context, if any, of self, and fc2
441 """
441 """
442
442
443 acache = {}
443 acache = {}
444
444
445 # prime the ancestor cache for the working directory
445 # prime the ancestor cache for the working directory
446 for c in (self, fc2):
446 for c in (self, fc2):
447 if c._filerev == None:
447 if c._filerev is None:
448 pl = [(n.path(), n.filenode()) for n in c.parents()]
448 pl = [(n.path(), n.filenode()) for n in c.parents()]
449 acache[(c._path, None)] = pl
449 acache[(c._path, None)] = pl
450
450
451 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
451 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
452 def parents(vertex):
452 def parents(vertex):
453 if vertex in acache:
453 if vertex in acache:
454 return acache[vertex]
454 return acache[vertex]
455 f, n = vertex
455 f, n = vertex
456 if f not in flcache:
456 if f not in flcache:
457 flcache[f] = self._repo.file(f)
457 flcache[f] = self._repo.file(f)
458 fl = flcache[f]
458 fl = flcache[f]
459 pl = [(f, p) for p in fl.parents(n) if p != nullid]
459 pl = [(f, p) for p in fl.parents(n) if p != nullid]
460 re = fl.renamed(n)
460 re = fl.renamed(n)
461 if re:
461 if re:
462 pl.append(re)
462 pl.append(re)
463 acache[vertex] = pl
463 acache[vertex] = pl
464 return pl
464 return pl
465
465
466 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
466 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
467 v = ancestor.ancestor(a, b, parents)
467 v = ancestor.ancestor(a, b, parents)
468 if v:
468 if v:
469 f, n = v
469 f, n = v
470 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
470 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
471
471
472 return None
472 return None
473
473
474 class workingctx(changectx):
474 class workingctx(changectx):
475 """A workingctx object makes access to data related to
475 """A workingctx object makes access to data related to
476 the current working directory convenient.
476 the current working directory convenient.
477 parents - a pair of parent nodeids, or None to use the dirstate.
477 parents - a pair of parent nodeids, or None to use the dirstate.
478 date - any valid date string or (unixtime, offset), or None.
478 date - any valid date string or (unixtime, offset), or None.
479 user - username string, or None.
479 user - username string, or None.
480 extra - a dictionary of extra values, or None.
480 extra - a dictionary of extra values, or None.
481 changes - a list of file lists as returned by localrepo.status()
481 changes - a list of file lists as returned by localrepo.status()
482 or None to use the repository status.
482 or None to use the repository status.
483 """
483 """
484 def __init__(self, repo, parents=None, text="", user=None, date=None,
484 def __init__(self, repo, parents=None, text="", user=None, date=None,
485 extra=None, changes=None):
485 extra=None, changes=None):
486 self._repo = repo
486 self._repo = repo
487 self._rev = None
487 self._rev = None
488 self._node = None
488 self._node = None
489 self._text = text
489 self._text = text
490 if date:
490 if date:
491 self._date = util.parsedate(date)
491 self._date = util.parsedate(date)
492 if user:
492 if user:
493 self._user = user
493 self._user = user
494 if parents:
494 if parents:
495 self._parents = [changectx(self._repo, p) for p in parents]
495 self._parents = [changectx(self._repo, p) for p in parents]
496 if changes:
496 if changes:
497 self._status = list(changes)
497 self._status = list(changes)
498
498
499 self._extra = {}
499 self._extra = {}
500 if extra:
500 if extra:
501 self._extra = extra.copy()
501 self._extra = extra.copy()
502 if 'branch' not in self._extra:
502 if 'branch' not in self._extra:
503 branch = self._repo.dirstate.branch()
503 branch = self._repo.dirstate.branch()
504 try:
504 try:
505 branch = branch.decode('UTF-8').encode('UTF-8')
505 branch = branch.decode('UTF-8').encode('UTF-8')
506 except UnicodeDecodeError:
506 except UnicodeDecodeError:
507 raise util.Abort(_('branch name not in UTF-8!'))
507 raise util.Abort(_('branch name not in UTF-8!'))
508 self._extra['branch'] = branch
508 self._extra['branch'] = branch
509 if self._extra['branch'] == '':
509 if self._extra['branch'] == '':
510 self._extra['branch'] = 'default'
510 self._extra['branch'] = 'default'
511
511
512 def __str__(self):
512 def __str__(self):
513 return str(self._parents[0]) + "+"
513 return str(self._parents[0]) + "+"
514
514
515 def __nonzero__(self):
515 def __nonzero__(self):
516 return True
516 return True
517
517
518 def __contains__(self, key):
518 def __contains__(self, key):
519 return self._repo.dirstate[key] not in "?r"
519 return self._repo.dirstate[key] not in "?r"
520
520
521 @propertycache
521 @propertycache
522 def _manifest(self):
522 def _manifest(self):
523 """generate a manifest corresponding to the working directory"""
523 """generate a manifest corresponding to the working directory"""
524
524
525 man = self._parents[0].manifest().copy()
525 man = self._parents[0].manifest().copy()
526 copied = self._repo.dirstate.copies()
526 copied = self._repo.dirstate.copies()
527 cf = lambda x: man.flags(copied.get(x, x))
527 cf = lambda x: man.flags(copied.get(x, x))
528 ff = self._repo.dirstate.flagfunc(cf)
528 ff = self._repo.dirstate.flagfunc(cf)
529 modified, added, removed, deleted, unknown = self._status[:5]
529 modified, added, removed, deleted, unknown = self._status[:5]
530 for i, l in (("a", added), ("m", modified), ("u", unknown)):
530 for i, l in (("a", added), ("m", modified), ("u", unknown)):
531 for f in l:
531 for f in l:
532 man[f] = man.get(copied.get(f, f), nullid) + i
532 man[f] = man.get(copied.get(f, f), nullid) + i
533 try:
533 try:
534 man.set(f, ff(f))
534 man.set(f, ff(f))
535 except OSError:
535 except OSError:
536 pass
536 pass
537
537
538 for f in deleted + removed:
538 for f in deleted + removed:
539 if f in man:
539 if f in man:
540 del man[f]
540 del man[f]
541
541
542 return man
542 return man
543
543
544 @propertycache
544 @propertycache
545 def _status(self):
545 def _status(self):
546 return self._repo.status(unknown=True)
546 return self._repo.status(unknown=True)
547
547
548 @propertycache
548 @propertycache
549 def _user(self):
549 def _user(self):
550 return self._repo.ui.username()
550 return self._repo.ui.username()
551
551
552 @propertycache
552 @propertycache
553 def _date(self):
553 def _date(self):
554 return util.makedate()
554 return util.makedate()
555
555
556 @propertycache
556 @propertycache
557 def _parents(self):
557 def _parents(self):
558 p = self._repo.dirstate.parents()
558 p = self._repo.dirstate.parents()
559 if p[1] == nullid:
559 if p[1] == nullid:
560 p = p[:-1]
560 p = p[:-1]
561 self._parents = [changectx(self._repo, x) for x in p]
561 self._parents = [changectx(self._repo, x) for x in p]
562 return self._parents
562 return self._parents
563
563
564 def manifest(self): return self._manifest
564 def manifest(self): return self._manifest
565
565
566 def user(self): return self._user or self._repo.ui.username()
566 def user(self): return self._user or self._repo.ui.username()
567 def date(self): return self._date
567 def date(self): return self._date
568 def description(self): return self._text
568 def description(self): return self._text
569 def files(self):
569 def files(self):
570 return sorted(self._status[0] + self._status[1] + self._status[2])
570 return sorted(self._status[0] + self._status[1] + self._status[2])
571
571
572 def modified(self): return self._status[0]
572 def modified(self): return self._status[0]
573 def added(self): return self._status[1]
573 def added(self): return self._status[1]
574 def removed(self): return self._status[2]
574 def removed(self): return self._status[2]
575 def deleted(self): return self._status[3]
575 def deleted(self): return self._status[3]
576 def unknown(self): return self._status[4]
576 def unknown(self): return self._status[4]
577 def clean(self): return self._status[5]
577 def clean(self): return self._status[5]
578 def branch(self): return self._extra['branch']
578 def branch(self): return self._extra['branch']
579 def extra(self): return self._extra
579 def extra(self): return self._extra
580
580
581 def tags(self):
581 def tags(self):
582 t = []
582 t = []
583 [t.extend(p.tags()) for p in self.parents()]
583 [t.extend(p.tags()) for p in self.parents()]
584 return t
584 return t
585
585
586 def children(self):
586 def children(self):
587 return []
587 return []
588
588
589 def flags(self, path):
589 def flags(self, path):
590 if '_manifest' in self.__dict__:
590 if '_manifest' in self.__dict__:
591 try:
591 try:
592 return self._manifest.flags(path)
592 return self._manifest.flags(path)
593 except KeyError:
593 except KeyError:
594 return ''
594 return ''
595
595
596 pnode = self._parents[0].changeset()[0]
596 pnode = self._parents[0].changeset()[0]
597 orig = self._repo.dirstate.copies().get(path, path)
597 orig = self._repo.dirstate.copies().get(path, path)
598 node, flag = self._repo.manifest.find(pnode, orig)
598 node, flag = self._repo.manifest.find(pnode, orig)
599 try:
599 try:
600 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
600 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
601 return ff(path)
601 return ff(path)
602 except OSError:
602 except OSError:
603 pass
603 pass
604
604
605 if not node or path in self.deleted() or path in self.removed():
605 if not node or path in self.deleted() or path in self.removed():
606 return ''
606 return ''
607 return flag
607 return flag
608
608
609 def filectx(self, path, filelog=None):
609 def filectx(self, path, filelog=None):
610 """get a file context from the working directory"""
610 """get a file context from the working directory"""
611 return workingfilectx(self._repo, path, workingctx=self,
611 return workingfilectx(self._repo, path, workingctx=self,
612 filelog=filelog)
612 filelog=filelog)
613
613
614 def ancestor(self, c2):
614 def ancestor(self, c2):
615 """return the ancestor context of self and c2"""
615 """return the ancestor context of self and c2"""
616 return self._parents[0].ancestor(c2) # punt on two parents for now
616 return self._parents[0].ancestor(c2) # punt on two parents for now
617
617
618 def walk(self, match):
618 def walk(self, match):
619 return sorted(self._repo.dirstate.walk(match, True, False))
619 return sorted(self._repo.dirstate.walk(match, True, False))
620
620
621 class workingfilectx(filectx):
621 class workingfilectx(filectx):
622 """A workingfilectx object makes access to data related to a particular
622 """A workingfilectx object makes access to data related to a particular
623 file in the working directory convenient."""
623 file in the working directory convenient."""
624 def __init__(self, repo, path, filelog=None, workingctx=None):
624 def __init__(self, repo, path, filelog=None, workingctx=None):
625 """changeid can be a changeset revision, node, or tag.
625 """changeid can be a changeset revision, node, or tag.
626 fileid can be a file revision or node."""
626 fileid can be a file revision or node."""
627 self._repo = repo
627 self._repo = repo
628 self._path = path
628 self._path = path
629 self._changeid = None
629 self._changeid = None
630 self._filerev = self._filenode = None
630 self._filerev = self._filenode = None
631
631
632 if filelog:
632 if filelog:
633 self._filelog = filelog
633 self._filelog = filelog
634 if workingctx:
634 if workingctx:
635 self._changectx = workingctx
635 self._changectx = workingctx
636
636
637 @propertycache
637 @propertycache
638 def _changectx(self):
638 def _changectx(self):
639 return workingctx(self._repo)
639 return workingctx(self._repo)
640
640
641 @propertycache
641 @propertycache
642 def _repopath(self):
642 def _repopath(self):
643 return self._repo.dirstate.copied(self._path) or self._path
643 return self._repo.dirstate.copied(self._path) or self._path
644
644
645 @propertycache
645 @propertycache
646 def _filelog(self):
646 def _filelog(self):
647 return self._repo.file(self._repopath)
647 return self._repo.file(self._repopath)
648
648
649 def __nonzero__(self):
649 def __nonzero__(self):
650 return True
650 return True
651
651
652 def __str__(self):
652 def __str__(self):
653 return "%s@%s" % (self.path(), self._changectx)
653 return "%s@%s" % (self.path(), self._changectx)
654
654
655 def filectx(self, fileid):
655 def filectx(self, fileid):
656 '''opens an arbitrary revision of the file without
656 '''opens an arbitrary revision of the file without
657 opening a new filelog'''
657 opening a new filelog'''
658 return filectx(self._repo, self._repopath, fileid=fileid,
658 return filectx(self._repo, self._repopath, fileid=fileid,
659 filelog=self._filelog)
659 filelog=self._filelog)
660
660
661 def rev(self):
661 def rev(self):
662 if '_changectx' in self.__dict__:
662 if '_changectx' in self.__dict__:
663 return self._changectx.rev()
663 return self._changectx.rev()
664 return self._filelog.linkrev(self._filerev)
664 return self._filelog.linkrev(self._filerev)
665
665
666 def data(self): return self._repo.wread(self._path)
666 def data(self): return self._repo.wread(self._path)
667 def renamed(self):
667 def renamed(self):
668 rp = self._repopath
668 rp = self._repopath
669 if rp == self._path:
669 if rp == self._path:
670 return None
670 return None
671 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
671 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
672
672
673 def parents(self):
673 def parents(self):
674 '''return parent filectxs, following copies if necessary'''
674 '''return parent filectxs, following copies if necessary'''
675 p = self._path
675 p = self._path
676 rp = self._repopath
676 rp = self._repopath
677 pcl = self._changectx._parents
677 pcl = self._changectx._parents
678 fl = self._filelog
678 fl = self._filelog
679 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
679 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
680 if len(pcl) > 1:
680 if len(pcl) > 1:
681 if rp != p:
681 if rp != p:
682 fl = None
682 fl = None
683 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
683 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
684
684
685 return [filectx(self._repo, p, fileid=n, filelog=l)
685 return [filectx(self._repo, p, fileid=n, filelog=l)
686 for p,n,l in pl if n != nullid]
686 for p,n,l in pl if n != nullid]
687
687
688 def children(self):
688 def children(self):
689 return []
689 return []
690
690
691 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
691 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
692 def date(self):
692 def date(self):
693 t, tz = self._changectx.date()
693 t, tz = self._changectx.date()
694 try:
694 try:
695 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
695 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
696 except OSError, err:
696 except OSError, err:
697 if err.errno != errno.ENOENT: raise
697 if err.errno != errno.ENOENT: raise
698 return (t, tz)
698 return (t, tz)
699
699
700 def cmp(self, text): return self._repo.wread(self._path) == text
700 def cmp(self, text): return self._repo.wread(self._path) == text
701
701
702 class memctx(object):
702 class memctx(object):
703 """Use memctx to perform in-memory commits via localrepo.commitctx().
703 """Use memctx to perform in-memory commits via localrepo.commitctx().
704
704
705 Revision information is supplied at initialization time while
705 Revision information is supplied at initialization time while
706 related files data and is made available through a callback
706 related files data and is made available through a callback
707 mechanism. 'repo' is the current localrepo, 'parents' is a
707 mechanism. 'repo' is the current localrepo, 'parents' is a
708 sequence of two parent revisions identifiers (pass None for every
708 sequence of two parent revisions identifiers (pass None for every
709 missing parent), 'text' is the commit message and 'files' lists
709 missing parent), 'text' is the commit message and 'files' lists
710 names of files touched by the revision (normalized and relative to
710 names of files touched by the revision (normalized and relative to
711 repository root).
711 repository root).
712
712
713 filectxfn(repo, memctx, path) is a callable receiving the
713 filectxfn(repo, memctx, path) is a callable receiving the
714 repository, the current memctx object and the normalized path of
714 repository, the current memctx object and the normalized path of
715 requested file, relative to repository root. It is fired by the
715 requested file, relative to repository root. It is fired by the
716 commit function for every file in 'files', but calls order is
716 commit function for every file in 'files', but calls order is
717 undefined. If the file is available in the revision being
717 undefined. If the file is available in the revision being
718 committed (updated or added), filectxfn returns a memfilectx
718 committed (updated or added), filectxfn returns a memfilectx
719 object. If the file was removed, filectxfn raises an
719 object. If the file was removed, filectxfn raises an
720 IOError. Moved files are represented by marking the source file
720 IOError. Moved files are represented by marking the source file
721 removed and the new file added with copy information (see
721 removed and the new file added with copy information (see
722 memfilectx).
722 memfilectx).
723
723
724 user receives the committer name and defaults to current
724 user receives the committer name and defaults to current
725 repository username, date is the commit date in any format
725 repository username, date is the commit date in any format
726 supported by util.parsedate() and defaults to current date, extra
726 supported by util.parsedate() and defaults to current date, extra
727 is a dictionary of metadata or is left empty.
727 is a dictionary of metadata or is left empty.
728 """
728 """
729 def __init__(self, repo, parents, text, files, filectxfn, user=None,
729 def __init__(self, repo, parents, text, files, filectxfn, user=None,
730 date=None, extra=None):
730 date=None, extra=None):
731 self._repo = repo
731 self._repo = repo
732 self._rev = None
732 self._rev = None
733 self._node = None
733 self._node = None
734 self._text = text
734 self._text = text
735 self._date = date and util.parsedate(date) or util.makedate()
735 self._date = date and util.parsedate(date) or util.makedate()
736 self._user = user
736 self._user = user
737 parents = [(p or nullid) for p in parents]
737 parents = [(p or nullid) for p in parents]
738 p1, p2 = parents
738 p1, p2 = parents
739 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
739 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
740 files = sorted(set(files))
740 files = sorted(set(files))
741 self._status = [files, [], [], [], []]
741 self._status = [files, [], [], [], []]
742 self._filectxfn = filectxfn
742 self._filectxfn = filectxfn
743
743
744 self._extra = extra and extra.copy() or {}
744 self._extra = extra and extra.copy() or {}
745 if 'branch' not in self._extra:
745 if 'branch' not in self._extra:
746 self._extra['branch'] = 'default'
746 self._extra['branch'] = 'default'
747 elif self._extra.get('branch') == '':
747 elif self._extra.get('branch') == '':
748 self._extra['branch'] = 'default'
748 self._extra['branch'] = 'default'
749
749
750 def __str__(self):
750 def __str__(self):
751 return str(self._parents[0]) + "+"
751 return str(self._parents[0]) + "+"
752
752
753 def __int__(self):
753 def __int__(self):
754 return self._rev
754 return self._rev
755
755
756 def __nonzero__(self):
756 def __nonzero__(self):
757 return True
757 return True
758
758
759 def __getitem__(self, key):
759 def __getitem__(self, key):
760 return self.filectx(key)
760 return self.filectx(key)
761
761
762 def p1(self): return self._parents[0]
762 def p1(self): return self._parents[0]
763 def p2(self): return self._parents[1]
763 def p2(self): return self._parents[1]
764
764
765 def user(self): return self._user or self._repo.ui.username()
765 def user(self): return self._user or self._repo.ui.username()
766 def date(self): return self._date
766 def date(self): return self._date
767 def description(self): return self._text
767 def description(self): return self._text
768 def files(self): return self.modified()
768 def files(self): return self.modified()
769 def modified(self): return self._status[0]
769 def modified(self): return self._status[0]
770 def added(self): return self._status[1]
770 def added(self): return self._status[1]
771 def removed(self): return self._status[2]
771 def removed(self): return self._status[2]
772 def deleted(self): return self._status[3]
772 def deleted(self): return self._status[3]
773 def unknown(self): return self._status[4]
773 def unknown(self): return self._status[4]
774 def clean(self): return self._status[5]
774 def clean(self): return self._status[5]
775 def branch(self): return self._extra['branch']
775 def branch(self): return self._extra['branch']
776 def extra(self): return self._extra
776 def extra(self): return self._extra
777 def flags(self, f): return self[f].flags()
777 def flags(self, f): return self[f].flags()
778
778
779 def parents(self):
779 def parents(self):
780 """return contexts for each parent changeset"""
780 """return contexts for each parent changeset"""
781 return self._parents
781 return self._parents
782
782
783 def filectx(self, path, filelog=None):
783 def filectx(self, path, filelog=None):
784 """get a file context from the working directory"""
784 """get a file context from the working directory"""
785 return self._filectxfn(self._repo, self, path)
785 return self._filectxfn(self._repo, self, path)
786
786
787 class memfilectx(object):
787 class memfilectx(object):
788 """memfilectx represents an in-memory file to commit.
788 """memfilectx represents an in-memory file to commit.
789
789
790 See memctx for more details.
790 See memctx for more details.
791 """
791 """
792 def __init__(self, path, data, islink, isexec, copied):
792 def __init__(self, path, data, islink, isexec, copied):
793 """
793 """
794 path is the normalized file path relative to repository root.
794 path is the normalized file path relative to repository root.
795 data is the file content as a string.
795 data is the file content as a string.
796 islink is True if the file is a symbolic link.
796 islink is True if the file is a symbolic link.
797 isexec is True if the file is executable.
797 isexec is True if the file is executable.
798 copied is the source file path if current file was copied in the
798 copied is the source file path if current file was copied in the
799 revision being committed, or None."""
799 revision being committed, or None."""
800 self._path = path
800 self._path = path
801 self._data = data
801 self._data = data
802 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
802 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
803 self._copied = None
803 self._copied = None
804 if copied:
804 if copied:
805 self._copied = (copied, nullid)
805 self._copied = (copied, nullid)
806
806
807 def __nonzero__(self): return True
807 def __nonzero__(self): return True
808 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
808 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
809 def path(self): return self._path
809 def path(self): return self._path
810 def data(self): return self._data
810 def data(self): return self._data
811 def flags(self): return self._flags
811 def flags(self): return self._flags
812 def isexec(self): return 'x' in self._flags
812 def isexec(self): return 'x' in self._flags
813 def islink(self): return 'l' in self._flags
813 def islink(self): return 'l' in self._flags
814 def renamed(self): return self._copied
814 def renamed(self): return self._copied
@@ -1,233 +1,233 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import util
9 import util
10 import heapq
10 import heapq
11
11
12 def _nonoverlap(d1, d2, d3):
12 def _nonoverlap(d1, d2, d3):
13 "Return list of elements in d1 not in d2 or d3"
13 "Return list of elements in d1 not in d2 or d3"
14 return sorted([d for d in d1 if d not in d3 and d not in d2])
14 return sorted([d for d in d1 if d not in d3 and d not in d2])
15
15
16 def _dirname(f):
16 def _dirname(f):
17 s = f.rfind("/")
17 s = f.rfind("/")
18 if s == -1:
18 if s == -1:
19 return ""
19 return ""
20 return f[:s]
20 return f[:s]
21
21
22 def _dirs(files):
22 def _dirs(files):
23 d = set()
23 d = set()
24 for f in files:
24 for f in files:
25 f = _dirname(f)
25 f = _dirname(f)
26 while f not in d:
26 while f not in d:
27 d.add(f)
27 d.add(f)
28 f = _dirname(f)
28 f = _dirname(f)
29 return d
29 return d
30
30
31 def _findoldnames(fctx, limit):
31 def _findoldnames(fctx, limit):
32 "find files that path was copied from, back to linkrev limit"
32 "find files that path was copied from, back to linkrev limit"
33 old = {}
33 old = {}
34 seen = set()
34 seen = set()
35 orig = fctx.path()
35 orig = fctx.path()
36 visit = [(fctx, 0)]
36 visit = [(fctx, 0)]
37 while visit:
37 while visit:
38 fc, depth = visit.pop()
38 fc, depth = visit.pop()
39 s = str(fc)
39 s = str(fc)
40 if s in seen:
40 if s in seen:
41 continue
41 continue
42 seen.add(s)
42 seen.add(s)
43 if fc.path() != orig and fc.path() not in old:
43 if fc.path() != orig and fc.path() not in old:
44 old[fc.path()] = (depth, fc.path()) # remember depth
44 old[fc.path()] = (depth, fc.path()) # remember depth
45 if fc.rev() < limit and fc.rev() is not None:
45 if fc.rev() < limit and fc.rev() is not None:
46 continue
46 continue
47 visit += [(p, depth - 1) for p in fc.parents()]
47 visit += [(p, depth - 1) for p in fc.parents()]
48
48
49 # return old names sorted by depth
49 # return old names sorted by depth
50 return [o[1] for o in sorted(old.values())]
50 return [o[1] for o in sorted(old.values())]
51
51
52 def _findlimit(repo, a, b):
52 def _findlimit(repo, a, b):
53 "find the earliest revision that's an ancestor of a or b but not both"
53 "find the earliest revision that's an ancestor of a or b but not both"
54 # basic idea:
54 # basic idea:
55 # - mark a and b with different sides
55 # - mark a and b with different sides
56 # - if a parent's children are all on the same side, the parent is
56 # - if a parent's children are all on the same side, the parent is
57 # on that side, otherwise it is on no side
57 # on that side, otherwise it is on no side
58 # - walk the graph in topological order with the help of a heap;
58 # - walk the graph in topological order with the help of a heap;
59 # - add unseen parents to side map
59 # - add unseen parents to side map
60 # - clear side of any parent that has children on different sides
60 # - clear side of any parent that has children on different sides
61 # - track number of interesting revs that might still be on a side
61 # - track number of interesting revs that might still be on a side
62 # - track the lowest interesting rev seen
62 # - track the lowest interesting rev seen
63 # - quit when interesting revs is zero
63 # - quit when interesting revs is zero
64
64
65 cl = repo.changelog
65 cl = repo.changelog
66 working = len(cl) # pseudo rev for the working directory
66 working = len(cl) # pseudo rev for the working directory
67 if a is None:
67 if a is None:
68 a = working
68 a = working
69 if b is None:
69 if b is None:
70 b = working
70 b = working
71
71
72 side = {a: -1, b: 1}
72 side = {a: -1, b: 1}
73 visit = [-a, -b]
73 visit = [-a, -b]
74 heapq.heapify(visit)
74 heapq.heapify(visit)
75 interesting = len(visit)
75 interesting = len(visit)
76 limit = working
76 limit = working
77
77
78 while interesting:
78 while interesting:
79 r = -heapq.heappop(visit)
79 r = -heapq.heappop(visit)
80 if r == working:
80 if r == working:
81 parents = [cl.rev(p) for p in repo.dirstate.parents()]
81 parents = [cl.rev(p) for p in repo.dirstate.parents()]
82 else:
82 else:
83 parents = cl.parentrevs(r)
83 parents = cl.parentrevs(r)
84 for p in parents:
84 for p in parents:
85 if p not in side:
85 if p not in side:
86 # first time we see p; add it to visit
86 # first time we see p; add it to visit
87 side[p] = side[r]
87 side[p] = side[r]
88 if side[p]:
88 if side[p]:
89 interesting += 1
89 interesting += 1
90 heapq.heappush(visit, -p)
90 heapq.heappush(visit, -p)
91 elif side[p] and side[p] != side[r]:
91 elif side[p] and side[p] != side[r]:
92 # p was interesting but now we know better
92 # p was interesting but now we know better
93 side[p] = 0
93 side[p] = 0
94 interesting -= 1
94 interesting -= 1
95 if side[r]:
95 if side[r]:
96 limit = r # lowest rev visited
96 limit = r # lowest rev visited
97 interesting -= 1
97 interesting -= 1
98 return limit
98 return limit
99
99
100 def copies(repo, c1, c2, ca, checkdirs=False):
100 def copies(repo, c1, c2, ca, checkdirs=False):
101 """
101 """
102 Find moves and copies between context c1 and c2
102 Find moves and copies between context c1 and c2
103 """
103 """
104 # avoid silly behavior for update from empty dir
104 # avoid silly behavior for update from empty dir
105 if not c1 or not c2 or c1 == c2:
105 if not c1 or not c2 or c1 == c2:
106 return {}, {}
106 return {}, {}
107
107
108 # avoid silly behavior for parent -> working dir
108 # avoid silly behavior for parent -> working dir
109 if c2.node() == None and c1.node() == repo.dirstate.parents()[0]:
109 if c2.node() is None and c1.node() == repo.dirstate.parents()[0]:
110 return repo.dirstate.copies(), {}
110 return repo.dirstate.copies(), {}
111
111
112 limit = _findlimit(repo, c1.rev(), c2.rev())
112 limit = _findlimit(repo, c1.rev(), c2.rev())
113 m1 = c1.manifest()
113 m1 = c1.manifest()
114 m2 = c2.manifest()
114 m2 = c2.manifest()
115 ma = ca.manifest()
115 ma = ca.manifest()
116
116
117 def makectx(f, n):
117 def makectx(f, n):
118 if len(n) != 20: # in a working context?
118 if len(n) != 20: # in a working context?
119 if c1.rev() is None:
119 if c1.rev() is None:
120 return c1.filectx(f)
120 return c1.filectx(f)
121 return c2.filectx(f)
121 return c2.filectx(f)
122 return repo.filectx(f, fileid=n)
122 return repo.filectx(f, fileid=n)
123 ctx = util.cachefunc(makectx)
123 ctx = util.cachefunc(makectx)
124
124
125 copy = {}
125 copy = {}
126 fullcopy = {}
126 fullcopy = {}
127 diverge = {}
127 diverge = {}
128
128
129 def checkcopies(f, m1, m2):
129 def checkcopies(f, m1, m2):
130 '''check possible copies of f from m1 to m2'''
130 '''check possible copies of f from m1 to m2'''
131 c1 = ctx(f, m1[f])
131 c1 = ctx(f, m1[f])
132 for of in _findoldnames(c1, limit):
132 for of in _findoldnames(c1, limit):
133 fullcopy[f] = of # remember for dir rename detection
133 fullcopy[f] = of # remember for dir rename detection
134 if of in m2: # original file not in other manifest?
134 if of in m2: # original file not in other manifest?
135 # if the original file is unchanged on the other branch,
135 # if the original file is unchanged on the other branch,
136 # no merge needed
136 # no merge needed
137 if m2[of] != ma.get(of):
137 if m2[of] != ma.get(of):
138 c2 = ctx(of, m2[of])
138 c2 = ctx(of, m2[of])
139 ca = c1.ancestor(c2)
139 ca = c1.ancestor(c2)
140 # related and named changed on only one side?
140 # related and named changed on only one side?
141 if ca and (ca.path() == f or ca.path() == c2.path()):
141 if ca and (ca.path() == f or ca.path() == c2.path()):
142 if c1 != ca or c2 != ca: # merge needed?
142 if c1 != ca or c2 != ca: # merge needed?
143 copy[f] = of
143 copy[f] = of
144 elif of in ma:
144 elif of in ma:
145 diverge.setdefault(of, []).append(f)
145 diverge.setdefault(of, []).append(f)
146
146
147 repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
147 repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
148
148
149 u1 = _nonoverlap(m1, m2, ma)
149 u1 = _nonoverlap(m1, m2, ma)
150 u2 = _nonoverlap(m2, m1, ma)
150 u2 = _nonoverlap(m2, m1, ma)
151
151
152 if u1:
152 if u1:
153 repo.ui.debug(_(" unmatched files in local:\n %s\n")
153 repo.ui.debug(_(" unmatched files in local:\n %s\n")
154 % "\n ".join(u1))
154 % "\n ".join(u1))
155 if u2:
155 if u2:
156 repo.ui.debug(_(" unmatched files in other:\n %s\n")
156 repo.ui.debug(_(" unmatched files in other:\n %s\n")
157 % "\n ".join(u2))
157 % "\n ".join(u2))
158
158
159 for f in u1:
159 for f in u1:
160 checkcopies(f, m1, m2)
160 checkcopies(f, m1, m2)
161 for f in u2:
161 for f in u2:
162 checkcopies(f, m2, m1)
162 checkcopies(f, m2, m1)
163
163
164 diverge2 = set()
164 diverge2 = set()
165 for of, fl in diverge.items():
165 for of, fl in diverge.items():
166 if len(fl) == 1:
166 if len(fl) == 1:
167 del diverge[of] # not actually divergent
167 del diverge[of] # not actually divergent
168 else:
168 else:
169 diverge2.update(fl) # reverse map for below
169 diverge2.update(fl) # reverse map for below
170
170
171 if fullcopy:
171 if fullcopy:
172 repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
172 repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
173 for f in fullcopy:
173 for f in fullcopy:
174 note = ""
174 note = ""
175 if f in copy: note += "*"
175 if f in copy: note += "*"
176 if f in diverge2: note += "!"
176 if f in diverge2: note += "!"
177 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
177 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
178 del diverge2
178 del diverge2
179
179
180 if not fullcopy or not checkdirs:
180 if not fullcopy or not checkdirs:
181 return copy, diverge
181 return copy, diverge
182
182
183 repo.ui.debug(_(" checking for directory renames\n"))
183 repo.ui.debug(_(" checking for directory renames\n"))
184
184
185 # generate a directory move map
185 # generate a directory move map
186 d1, d2 = _dirs(m1), _dirs(m2)
186 d1, d2 = _dirs(m1), _dirs(m2)
187 invalid = set()
187 invalid = set()
188 dirmove = {}
188 dirmove = {}
189
189
190 # examine each file copy for a potential directory move, which is
190 # examine each file copy for a potential directory move, which is
191 # when all the files in a directory are moved to a new directory
191 # when all the files in a directory are moved to a new directory
192 for dst, src in fullcopy.iteritems():
192 for dst, src in fullcopy.iteritems():
193 dsrc, ddst = _dirname(src), _dirname(dst)
193 dsrc, ddst = _dirname(src), _dirname(dst)
194 if dsrc in invalid:
194 if dsrc in invalid:
195 # already seen to be uninteresting
195 # already seen to be uninteresting
196 continue
196 continue
197 elif dsrc in d1 and ddst in d1:
197 elif dsrc in d1 and ddst in d1:
198 # directory wasn't entirely moved locally
198 # directory wasn't entirely moved locally
199 invalid.add(dsrc)
199 invalid.add(dsrc)
200 elif dsrc in d2 and ddst in d2:
200 elif dsrc in d2 and ddst in d2:
201 # directory wasn't entirely moved remotely
201 # directory wasn't entirely moved remotely
202 invalid.add(dsrc)
202 invalid.add(dsrc)
203 elif dsrc in dirmove and dirmove[dsrc] != ddst:
203 elif dsrc in dirmove and dirmove[dsrc] != ddst:
204 # files from the same directory moved to two different places
204 # files from the same directory moved to two different places
205 invalid.add(dsrc)
205 invalid.add(dsrc)
206 else:
206 else:
207 # looks good so far
207 # looks good so far
208 dirmove[dsrc + "/"] = ddst + "/"
208 dirmove[dsrc + "/"] = ddst + "/"
209
209
210 for i in invalid:
210 for i in invalid:
211 if i in dirmove:
211 if i in dirmove:
212 del dirmove[i]
212 del dirmove[i]
213 del d1, d2, invalid
213 del d1, d2, invalid
214
214
215 if not dirmove:
215 if not dirmove:
216 return copy, diverge
216 return copy, diverge
217
217
218 for d in dirmove:
218 for d in dirmove:
219 repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
219 repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
220
220
221 # check unaccounted nonoverlapping files against directory moves
221 # check unaccounted nonoverlapping files against directory moves
222 for f in u1 + u2:
222 for f in u1 + u2:
223 if f not in fullcopy:
223 if f not in fullcopy:
224 for d in dirmove:
224 for d in dirmove:
225 if f.startswith(d):
225 if f.startswith(d):
226 # new file added in a directory that was moved, move it
226 # new file added in a directory that was moved, move it
227 df = dirmove[d] + f[len(d):]
227 df = dirmove[d] + f[len(d):]
228 if df not in copy:
228 if df not in copy:
229 copy[f] = df
229 copy[f] = df
230 repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
230 repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
231 break
231 break
232
232
233 return copy, diverge
233 return copy, diverge
@@ -1,2073 +1,2073 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid is None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def _branchheads(self):
363 def _branchheads(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self._branchheads().iteritems():
395 for bn, heads in self._branchheads().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477 try:
477 try:
478 if len(key) == 20:
478 if len(key) == 20:
479 key = hex(key)
479 key = hex(key)
480 except:
480 except:
481 pass
481 pass
482 raise error.RepoError(_("unknown revision '%s'") % key)
482 raise error.RepoError(_("unknown revision '%s'") % key)
483
483
484 def local(self):
484 def local(self):
485 return True
485 return True
486
486
487 def join(self, f):
487 def join(self, f):
488 return os.path.join(self.path, f)
488 return os.path.join(self.path, f)
489
489
490 def wjoin(self, f):
490 def wjoin(self, f):
491 return os.path.join(self.root, f)
491 return os.path.join(self.root, f)
492
492
493 def rjoin(self, f):
493 def rjoin(self, f):
494 return os.path.join(self.root, util.pconvert(f))
494 return os.path.join(self.root, util.pconvert(f))
495
495
496 def file(self, f):
496 def file(self, f):
497 if f[0] == '/':
497 if f[0] == '/':
498 f = f[1:]
498 f = f[1:]
499 return filelog.filelog(self.sopener, f)
499 return filelog.filelog(self.sopener, f)
500
500
501 def changectx(self, changeid):
501 def changectx(self, changeid):
502 return self[changeid]
502 return self[changeid]
503
503
504 def parents(self, changeid=None):
504 def parents(self, changeid=None):
505 '''get list of changectxs for parents of changeid'''
505 '''get list of changectxs for parents of changeid'''
506 return self[changeid].parents()
506 return self[changeid].parents()
507
507
508 def filectx(self, path, changeid=None, fileid=None):
508 def filectx(self, path, changeid=None, fileid=None):
509 """changeid can be a changeset revision, node, or tag.
509 """changeid can be a changeset revision, node, or tag.
510 fileid can be a file revision or node."""
510 fileid can be a file revision or node."""
511 return context.filectx(self, path, changeid, fileid)
511 return context.filectx(self, path, changeid, fileid)
512
512
513 def getcwd(self):
513 def getcwd(self):
514 return self.dirstate.getcwd()
514 return self.dirstate.getcwd()
515
515
516 def pathto(self, f, cwd=None):
516 def pathto(self, f, cwd=None):
517 return self.dirstate.pathto(f, cwd)
517 return self.dirstate.pathto(f, cwd)
518
518
519 def wfile(self, f, mode='r'):
519 def wfile(self, f, mode='r'):
520 return self.wopener(f, mode)
520 return self.wopener(f, mode)
521
521
522 def _link(self, f):
522 def _link(self, f):
523 return os.path.islink(self.wjoin(f))
523 return os.path.islink(self.wjoin(f))
524
524
525 def _filter(self, filter, filename, data):
525 def _filter(self, filter, filename, data):
526 if filter not in self.filterpats:
526 if filter not in self.filterpats:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems(filter):
528 for pat, cmd in self.ui.configitems(filter):
529 if cmd == '!':
529 if cmd == '!':
530 continue
530 continue
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
532 fn = None
533 params = cmd
533 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
534 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
535 if cmd.startswith(name):
536 fn = filterfn
536 fn = filterfn
537 params = cmd[len(name):].lstrip()
537 params = cmd[len(name):].lstrip()
538 break
538 break
539 if not fn:
539 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
541 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
542 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
543 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
545 l.append((mf, fn, params))
546 self.filterpats[filter] = l
546 self.filterpats[filter] = l
547
547
548 for mf, fn, cmd in self.filterpats[filter]:
548 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
549 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
552 break
553
553
554 return data
554 return data
555
555
556 def adddatafilter(self, name, filter):
556 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
557 self._datafilters[name] = filter
558
558
559 def wread(self, filename):
559 def wread(self, filename):
560 if self._link(filename):
560 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
561 data = os.readlink(self.wjoin(filename))
562 else:
562 else:
563 data = self.wopener(filename, 'r').read()
563 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
564 return self._filter("encode", filename, data)
565
565
566 def wwrite(self, filename, data, flags):
566 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
567 data = self._filter("decode", filename, data)
568 try:
568 try:
569 os.unlink(self.wjoin(filename))
569 os.unlink(self.wjoin(filename))
570 except OSError:
570 except OSError:
571 pass
571 pass
572 if 'l' in flags:
572 if 'l' in flags:
573 self.wopener.symlink(data, filename)
573 self.wopener.symlink(data, filename)
574 else:
574 else:
575 self.wopener(filename, 'w').write(data)
575 self.wopener(filename, 'w').write(data)
576 if 'x' in flags:
576 if 'x' in flags:
577 util.set_flags(self.wjoin(filename), False, True)
577 util.set_flags(self.wjoin(filename), False, True)
578
578
579 def wwritedata(self, filename, data):
579 def wwritedata(self, filename, data):
580 return self._filter("decode", filename, data)
580 return self._filter("decode", filename, data)
581
581
582 def transaction(self):
582 def transaction(self):
583 tr = self._transref and self._transref() or None
583 tr = self._transref and self._transref() or None
584 if tr and tr.running():
584 if tr and tr.running():
585 return tr.nest()
585 return tr.nest()
586
586
587 # abort here if the journal already exists
587 # abort here if the journal already exists
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 raise error.RepoError(_("journal already exists - run hg recover"))
589 raise error.RepoError(_("journal already exists - run hg recover"))
590
590
591 # save dirstate for rollback
591 # save dirstate for rollback
592 try:
592 try:
593 ds = self.opener("dirstate").read()
593 ds = self.opener("dirstate").read()
594 except IOError:
594 except IOError:
595 ds = ""
595 ds = ""
596 self.opener("journal.dirstate", "w").write(ds)
596 self.opener("journal.dirstate", "w").write(ds)
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598
598
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 (self.join("journal.branch"), self.join("undo.branch"))]
601 (self.join("journal.branch"), self.join("undo.branch"))]
602 tr = transaction.transaction(self.ui.warn, self.sopener,
602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 self.sjoin("journal"),
603 self.sjoin("journal"),
604 aftertrans(renames),
604 aftertrans(renames),
605 self.store.createmode)
605 self.store.createmode)
606 self._transref = weakref.ref(tr)
606 self._transref = weakref.ref(tr)
607 return tr
607 return tr
608
608
609 def recover(self):
609 def recover(self):
610 lock = self.lock()
610 lock = self.lock()
611 try:
611 try:
612 if os.path.exists(self.sjoin("journal")):
612 if os.path.exists(self.sjoin("journal")):
613 self.ui.status(_("rolling back interrupted transaction\n"))
613 self.ui.status(_("rolling back interrupted transaction\n"))
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 self.invalidate()
615 self.invalidate()
616 return True
616 return True
617 else:
617 else:
618 self.ui.warn(_("no interrupted transaction available\n"))
618 self.ui.warn(_("no interrupted transaction available\n"))
619 return False
619 return False
620 finally:
620 finally:
621 lock.release()
621 lock.release()
622
622
623 def rollback(self):
623 def rollback(self):
624 wlock = lock = None
624 wlock = lock = None
625 try:
625 try:
626 wlock = self.wlock()
626 wlock = self.wlock()
627 lock = self.lock()
627 lock = self.lock()
628 if os.path.exists(self.sjoin("undo")):
628 if os.path.exists(self.sjoin("undo")):
629 self.ui.status(_("rolling back last transaction\n"))
629 self.ui.status(_("rolling back last transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 try:
632 try:
633 branch = self.opener("undo.branch").read()
633 branch = self.opener("undo.branch").read()
634 self.dirstate.setbranch(branch)
634 self.dirstate.setbranch(branch)
635 except IOError:
635 except IOError:
636 self.ui.warn(_("Named branch could not be reset, "
636 self.ui.warn(_("Named branch could not be reset, "
637 "current branch still is: %s\n")
637 "current branch still is: %s\n")
638 % encoding.tolocal(self.dirstate.branch()))
638 % encoding.tolocal(self.dirstate.branch()))
639 self.invalidate()
639 self.invalidate()
640 self.dirstate.invalidate()
640 self.dirstate.invalidate()
641 else:
641 else:
642 self.ui.warn(_("no rollback information available\n"))
642 self.ui.warn(_("no rollback information available\n"))
643 finally:
643 finally:
644 release(lock, wlock)
644 release(lock, wlock)
645
645
646 def invalidate(self):
646 def invalidate(self):
647 for a in "changelog manifest".split():
647 for a in "changelog manifest".split():
648 if a in self.__dict__:
648 if a in self.__dict__:
649 delattr(self, a)
649 delattr(self, a)
650 self.tagscache = None
650 self.tagscache = None
651 self._tagstypecache = None
651 self._tagstypecache = None
652 self.nodetagscache = None
652 self.nodetagscache = None
653 self.branchcache = None
653 self.branchcache = None
654 self._ubranchcache = None
654 self._ubranchcache = None
655 self._branchcachetip = None
655 self._branchcachetip = None
656
656
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 try:
658 try:
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 except error.LockHeld, inst:
660 except error.LockHeld, inst:
661 if not wait:
661 if not wait:
662 raise
662 raise
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 (desc, inst.locker))
664 (desc, inst.locker))
665 # default to 600 seconds timeout
665 # default to 600 seconds timeout
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 releasefn, desc=desc)
667 releasefn, desc=desc)
668 if acquirefn:
668 if acquirefn:
669 acquirefn()
669 acquirefn()
670 return l
670 return l
671
671
672 def lock(self, wait=True):
672 def lock(self, wait=True):
673 l = self._lockref and self._lockref()
673 l = self._lockref and self._lockref()
674 if l is not None and l.held:
674 if l is not None and l.held:
675 l.lock()
675 l.lock()
676 return l
676 return l
677
677
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 _('repository %s') % self.origroot)
679 _('repository %s') % self.origroot)
680 self._lockref = weakref.ref(l)
680 self._lockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def wlock(self, wait=True):
683 def wlock(self, wait=True):
684 l = self._wlockref and self._wlockref()
684 l = self._wlockref and self._wlockref()
685 if l is not None and l.held:
685 if l is not None and l.held:
686 l.lock()
686 l.lock()
687 return l
687 return l
688
688
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 self.dirstate.invalidate, _('working directory of %s') %
690 self.dirstate.invalidate, _('working directory of %s') %
691 self.origroot)
691 self.origroot)
692 self._wlockref = weakref.ref(l)
692 self._wlockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 """
696 """
697 commit an individual file as part of a larger transaction
697 commit an individual file as part of a larger transaction
698 """
698 """
699
699
700 fname = fctx.path()
700 fname = fctx.path()
701 text = fctx.data()
701 text = fctx.data()
702 flog = self.file(fname)
702 flog = self.file(fname)
703 fparent1 = manifest1.get(fname, nullid)
703 fparent1 = manifest1.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705
705
706 meta = {}
706 meta = {}
707 copy = fctx.renamed()
707 copy = fctx.renamed()
708 if copy and copy[0] != fname:
708 if copy and copy[0] != fname:
709 # Mark the new revision of this file as a copy of another
709 # Mark the new revision of this file as a copy of another
710 # file. This copy data will effectively act as a parent
710 # file. This copy data will effectively act as a parent
711 # of this new revision. If this is a merge, the first
711 # of this new revision. If this is a merge, the first
712 # parent will be the nullid (meaning "look up the copy data")
712 # parent will be the nullid (meaning "look up the copy data")
713 # and the second one will be the other parent. For example:
713 # and the second one will be the other parent. For example:
714 #
714 #
715 # 0 --- 1 --- 3 rev1 changes file foo
715 # 0 --- 1 --- 3 rev1 changes file foo
716 # \ / rev2 renames foo to bar and changes it
716 # \ / rev2 renames foo to bar and changes it
717 # \- 2 -/ rev3 should have bar with all changes and
717 # \- 2 -/ rev3 should have bar with all changes and
718 # should record that bar descends from
718 # should record that bar descends from
719 # bar in rev2 and foo in rev1
719 # bar in rev2 and foo in rev1
720 #
720 #
721 # this allows this merge to succeed:
721 # this allows this merge to succeed:
722 #
722 #
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
725 # \- 2 --- 4 as the merge base
725 # \- 2 --- 4 as the merge base
726 #
726 #
727
727
728 cfname = copy[0]
728 cfname = copy[0]
729 crev = manifest1.get(cfname)
729 crev = manifest1.get(cfname)
730 newfparent = fparent2
730 newfparent = fparent2
731
731
732 if manifest2: # branch merge
732 if manifest2: # branch merge
733 if fparent2 == nullid or crev is None: # copied on remote side
733 if fparent2 == nullid or crev is None: # copied on remote side
734 if cfname in manifest2:
734 if cfname in manifest2:
735 crev = manifest2[cfname]
735 crev = manifest2[cfname]
736 newfparent = fparent1
736 newfparent = fparent1
737
737
738 # find source in nearest ancestor if we've lost track
738 # find source in nearest ancestor if we've lost track
739 if not crev:
739 if not crev:
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 (fname, cfname))
741 (fname, cfname))
742 for ancestor in self['.'].ancestors():
742 for ancestor in self['.'].ancestors():
743 if cfname in ancestor:
743 if cfname in ancestor:
744 crev = ancestor[cfname].filenode()
744 crev = ancestor[cfname].filenode()
745 break
745 break
746
746
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 meta["copy"] = cfname
748 meta["copy"] = cfname
749 meta["copyrev"] = hex(crev)
749 meta["copyrev"] = hex(crev)
750 fparent1, fparent2 = nullid, newfparent
750 fparent1, fparent2 = nullid, newfparent
751 elif fparent2 != nullid:
751 elif fparent2 != nullid:
752 # is one parent an ancestor of the other?
752 # is one parent an ancestor of the other?
753 fparentancestor = flog.ancestor(fparent1, fparent2)
753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 if fparentancestor == fparent1:
754 if fparentancestor == fparent1:
755 fparent1, fparent2 = fparent2, nullid
755 fparent1, fparent2 = fparent2, nullid
756 elif fparentancestor == fparent2:
756 elif fparentancestor == fparent2:
757 fparent2 = nullid
757 fparent2 = nullid
758
758
759 # is the file changed?
759 # is the file changed?
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 changelist.append(fname)
761 changelist.append(fname)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763
763
764 # are just the flags changed during merge?
764 # are just the flags changed during merge?
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 changelist.append(fname)
766 changelist.append(fname)
767
767
768 return fparent1
768 return fparent1
769
769
770 def commit(self, files=None, text="", user=None, date=None, match=None,
770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 force=False, editor=False, extra={}):
771 force=False, editor=False, extra={}):
772 """Add a new revision to current repository.
772 """Add a new revision to current repository.
773
773
774 Revision information is gathered from the working directory, files and
774 Revision information is gathered from the working directory, files and
775 match can be used to filter the committed files.
775 match can be used to filter the committed files.
776 If editor is supplied, it is called to get a commit message.
776 If editor is supplied, it is called to get a commit message.
777 """
777 """
778 wlock = self.wlock()
778 wlock = self.wlock()
779 try:
779 try:
780 p1, p2 = self.dirstate.parents()
780 p1, p2 = self.dirstate.parents()
781
781
782 if (not force and p2 != nullid and match and
782 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
783 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
786
786
787 if files:
787 if files:
788 modified, removed = [], []
788 modified, removed = [], []
789 for f in sorted(set(files)):
789 for f in sorted(set(files)):
790 s = self.dirstate[f]
790 s = self.dirstate[f]
791 if s in 'nma':
791 if s in 'nma':
792 modified.append(f)
792 modified.append(f)
793 elif s == 'r':
793 elif s == 'r':
794 removed.append(f)
794 removed.append(f)
795 else:
795 else:
796 self.ui.warn(_("%s not tracked!\n") % f)
796 self.ui.warn(_("%s not tracked!\n") % f)
797 changes = [modified, [], removed, [], []]
797 changes = [modified, [], removed, [], []]
798 else:
798 else:
799 changes = self.status(match=match)
799 changes = self.status(match=match)
800
800
801 if (not force and not extra.get("close") and p2 == nullid
801 if (not force and not extra.get("close") and p2 == nullid
802 and not (changes[0] or changes[1] or changes[2])
802 and not (changes[0] or changes[1] or changes[2])
803 and self[None].branch() == self['.'].branch()):
803 and self[None].branch() == self['.'].branch()):
804 self.ui.status(_("nothing changed\n"))
804 self.ui.status(_("nothing changed\n"))
805 return None
805 return None
806
806
807 ms = merge_.mergestate(self)
807 ms = merge_.mergestate(self)
808 for f in changes[0]:
808 for f in changes[0]:
809 if f in ms and ms[f] == 'u':
809 if f in ms and ms[f] == 'u':
810 raise util.Abort(_("unresolved merge conflicts "
810 raise util.Abort(_("unresolved merge conflicts "
811 "(see hg resolve)"))
811 "(see hg resolve)"))
812
812
813 wctx = context.workingctx(self, (p1, p2), text, user, date,
813 wctx = context.workingctx(self, (p1, p2), text, user, date,
814 extra, changes)
814 extra, changes)
815 if editor:
815 if editor:
816 wctx._text = editor(self, wctx,
816 wctx._text = editor(self, wctx,
817 changes[1], changes[0], changes[2])
817 changes[1], changes[0], changes[2])
818 ret = self.commitctx(wctx, True)
818 ret = self.commitctx(wctx, True)
819
819
820 # update dirstate and mergestate
820 # update dirstate and mergestate
821 for f in changes[0] + changes[1]:
821 for f in changes[0] + changes[1]:
822 self.dirstate.normal(f)
822 self.dirstate.normal(f)
823 for f in changes[2]:
823 for f in changes[2]:
824 self.dirstate.forget(f)
824 self.dirstate.forget(f)
825 self.dirstate.setparents(ret)
825 self.dirstate.setparents(ret)
826 ms.reset()
826 ms.reset()
827
827
828 return ret
828 return ret
829
829
830 finally:
830 finally:
831 wlock.release()
831 wlock.release()
832
832
833 def commitctx(self, ctx, error=False):
833 def commitctx(self, ctx, error=False):
834 """Add a new revision to current repository.
834 """Add a new revision to current repository.
835
835
836 Revision information is passed via the context argument.
836 Revision information is passed via the context argument.
837 """
837 """
838
838
839 tr = lock = None
839 tr = lock = None
840 removed = ctx.removed()
840 removed = ctx.removed()
841 p1, p2 = ctx.p1(), ctx.p2()
841 p1, p2 = ctx.p1(), ctx.p2()
842 m1 = p1.manifest().copy()
842 m1 = p1.manifest().copy()
843 m2 = p2.manifest()
843 m2 = p2.manifest()
844 user = ctx.user()
844 user = ctx.user()
845
845
846 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
846 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
847 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
847 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
848
848
849 lock = self.lock()
849 lock = self.lock()
850 try:
850 try:
851 tr = self.transaction()
851 tr = self.transaction()
852 trp = weakref.proxy(tr)
852 trp = weakref.proxy(tr)
853
853
854 # check in files
854 # check in files
855 new = {}
855 new = {}
856 changed = []
856 changed = []
857 linkrev = len(self)
857 linkrev = len(self)
858 for f in sorted(ctx.modified() + ctx.added()):
858 for f in sorted(ctx.modified() + ctx.added()):
859 self.ui.note(f + "\n")
859 self.ui.note(f + "\n")
860 try:
860 try:
861 fctx = ctx[f]
861 fctx = ctx[f]
862 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
862 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
863 changed)
863 changed)
864 m1.set(f, fctx.flags())
864 m1.set(f, fctx.flags())
865 except (OSError, IOError):
865 except (OSError, IOError):
866 if error:
866 if error:
867 self.ui.warn(_("trouble committing %s!\n") % f)
867 self.ui.warn(_("trouble committing %s!\n") % f)
868 raise
868 raise
869 else:
869 else:
870 removed.append(f)
870 removed.append(f)
871
871
872 # update manifest
872 # update manifest
873 m1.update(new)
873 m1.update(new)
874 removed = [f for f in sorted(removed) if f in m1 or f in m2]
874 removed = [f for f in sorted(removed) if f in m1 or f in m2]
875 drop = [f for f in removed if f in m1]
875 drop = [f for f in removed if f in m1]
876 for f in drop:
876 for f in drop:
877 del m1[f]
877 del m1[f]
878 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
878 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
879 p2.manifestnode(), (new, drop))
879 p2.manifestnode(), (new, drop))
880
880
881 # update changelog
881 # update changelog
882 self.changelog.delayupdate()
882 self.changelog.delayupdate()
883 n = self.changelog.add(mn, changed + removed, ctx.description(),
883 n = self.changelog.add(mn, changed + removed, ctx.description(),
884 trp, p1.node(), p2.node(),
884 trp, p1.node(), p2.node(),
885 user, ctx.date(), ctx.extra().copy())
885 user, ctx.date(), ctx.extra().copy())
886 p = lambda: self.changelog.writepending() and self.root or ""
886 p = lambda: self.changelog.writepending() and self.root or ""
887 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
887 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
888 parent2=xp2, pending=p)
888 parent2=xp2, pending=p)
889 self.changelog.finalize(trp)
889 self.changelog.finalize(trp)
890 tr.close()
890 tr.close()
891
891
892 if self.branchcache:
892 if self.branchcache:
893 self.branchtags()
893 self.branchtags()
894
894
895 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
895 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
896 return n
896 return n
897 finally:
897 finally:
898 del tr
898 del tr
899 lock.release()
899 lock.release()
900
900
901 def walk(self, match, node=None):
901 def walk(self, match, node=None):
902 '''
902 '''
903 walk recursively through the directory tree or a given
903 walk recursively through the directory tree or a given
904 changeset, finding all files matched by the match
904 changeset, finding all files matched by the match
905 function
905 function
906 '''
906 '''
907 return self[node].walk(match)
907 return self[node].walk(match)
908
908
909 def status(self, node1='.', node2=None, match=None,
909 def status(self, node1='.', node2=None, match=None,
910 ignored=False, clean=False, unknown=False):
910 ignored=False, clean=False, unknown=False):
911 """return status of files between two nodes or node and working directory
911 """return status of files between two nodes or node and working directory
912
912
913 If node1 is None, use the first dirstate parent instead.
913 If node1 is None, use the first dirstate parent instead.
914 If node2 is None, compare node1 with working directory.
914 If node2 is None, compare node1 with working directory.
915 """
915 """
916
916
917 def mfmatches(ctx):
917 def mfmatches(ctx):
918 mf = ctx.manifest().copy()
918 mf = ctx.manifest().copy()
919 for fn in mf.keys():
919 for fn in mf.keys():
920 if not match(fn):
920 if not match(fn):
921 del mf[fn]
921 del mf[fn]
922 return mf
922 return mf
923
923
924 if isinstance(node1, context.changectx):
924 if isinstance(node1, context.changectx):
925 ctx1 = node1
925 ctx1 = node1
926 else:
926 else:
927 ctx1 = self[node1]
927 ctx1 = self[node1]
928 if isinstance(node2, context.changectx):
928 if isinstance(node2, context.changectx):
929 ctx2 = node2
929 ctx2 = node2
930 else:
930 else:
931 ctx2 = self[node2]
931 ctx2 = self[node2]
932
932
933 working = ctx2.rev() is None
933 working = ctx2.rev() is None
934 parentworking = working and ctx1 == self['.']
934 parentworking = working and ctx1 == self['.']
935 match = match or match_.always(self.root, self.getcwd())
935 match = match or match_.always(self.root, self.getcwd())
936 listignored, listclean, listunknown = ignored, clean, unknown
936 listignored, listclean, listunknown = ignored, clean, unknown
937
937
938 # load earliest manifest first for caching reasons
938 # load earliest manifest first for caching reasons
939 if not working and ctx2.rev() < ctx1.rev():
939 if not working and ctx2.rev() < ctx1.rev():
940 ctx2.manifest()
940 ctx2.manifest()
941
941
942 if not parentworking:
942 if not parentworking:
943 def bad(f, msg):
943 def bad(f, msg):
944 if f not in ctx1:
944 if f not in ctx1:
945 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
945 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
946 return False
946 return False
947 match.bad = bad
947 match.bad = bad
948
948
949 if working: # we need to scan the working dir
949 if working: # we need to scan the working dir
950 s = self.dirstate.status(match, listignored, listclean, listunknown)
950 s = self.dirstate.status(match, listignored, listclean, listunknown)
951 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
951 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
952
952
953 # check for any possibly clean files
953 # check for any possibly clean files
954 if parentworking and cmp:
954 if parentworking and cmp:
955 fixup = []
955 fixup = []
956 # do a full compare of any files that might have changed
956 # do a full compare of any files that might have changed
957 for f in sorted(cmp):
957 for f in sorted(cmp):
958 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
958 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
959 or ctx1[f].cmp(ctx2[f].data())):
959 or ctx1[f].cmp(ctx2[f].data())):
960 modified.append(f)
960 modified.append(f)
961 else:
961 else:
962 fixup.append(f)
962 fixup.append(f)
963
963
964 if listclean:
964 if listclean:
965 clean += fixup
965 clean += fixup
966
966
967 # update dirstate for files that are actually clean
967 # update dirstate for files that are actually clean
968 if fixup:
968 if fixup:
969 wlock = None
969 wlock = None
970 try:
970 try:
971 try:
971 try:
972 # updating the dirstate is optional
972 # updating the dirstate is optional
973 # so we don't wait on the lock
973 # so we don't wait on the lock
974 wlock = self.wlock(False)
974 wlock = self.wlock(False)
975 for f in fixup:
975 for f in fixup:
976 self.dirstate.normal(f)
976 self.dirstate.normal(f)
977 except error.LockError:
977 except error.LockError:
978 pass
978 pass
979 finally:
979 finally:
980 release(wlock)
980 release(wlock)
981
981
982 if not parentworking:
982 if not parentworking:
983 mf1 = mfmatches(ctx1)
983 mf1 = mfmatches(ctx1)
984 if working:
984 if working:
985 # we are comparing working dir against non-parent
985 # we are comparing working dir against non-parent
986 # generate a pseudo-manifest for the working dir
986 # generate a pseudo-manifest for the working dir
987 mf2 = mfmatches(self['.'])
987 mf2 = mfmatches(self['.'])
988 for f in cmp + modified + added:
988 for f in cmp + modified + added:
989 mf2[f] = None
989 mf2[f] = None
990 mf2.set(f, ctx2.flags(f))
990 mf2.set(f, ctx2.flags(f))
991 for f in removed:
991 for f in removed:
992 if f in mf2:
992 if f in mf2:
993 del mf2[f]
993 del mf2[f]
994 else:
994 else:
995 # we are comparing two revisions
995 # we are comparing two revisions
996 deleted, unknown, ignored = [], [], []
996 deleted, unknown, ignored = [], [], []
997 mf2 = mfmatches(ctx2)
997 mf2 = mfmatches(ctx2)
998
998
999 modified, added, clean = [], [], []
999 modified, added, clean = [], [], []
1000 for fn in mf2:
1000 for fn in mf2:
1001 if fn in mf1:
1001 if fn in mf1:
1002 if (mf1.flags(fn) != mf2.flags(fn) or
1002 if (mf1.flags(fn) != mf2.flags(fn) or
1003 (mf1[fn] != mf2[fn] and
1003 (mf1[fn] != mf2[fn] and
1004 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1004 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1005 modified.append(fn)
1005 modified.append(fn)
1006 elif listclean:
1006 elif listclean:
1007 clean.append(fn)
1007 clean.append(fn)
1008 del mf1[fn]
1008 del mf1[fn]
1009 else:
1009 else:
1010 added.append(fn)
1010 added.append(fn)
1011 removed = mf1.keys()
1011 removed = mf1.keys()
1012
1012
1013 r = modified, added, removed, deleted, unknown, ignored, clean
1013 r = modified, added, removed, deleted, unknown, ignored, clean
1014 [l.sort() for l in r]
1014 [l.sort() for l in r]
1015 return r
1015 return r
1016
1016
1017 def add(self, list):
1017 def add(self, list):
1018 wlock = self.wlock()
1018 wlock = self.wlock()
1019 try:
1019 try:
1020 rejected = []
1020 rejected = []
1021 for f in list:
1021 for f in list:
1022 p = self.wjoin(f)
1022 p = self.wjoin(f)
1023 try:
1023 try:
1024 st = os.lstat(p)
1024 st = os.lstat(p)
1025 except:
1025 except:
1026 self.ui.warn(_("%s does not exist!\n") % f)
1026 self.ui.warn(_("%s does not exist!\n") % f)
1027 rejected.append(f)
1027 rejected.append(f)
1028 continue
1028 continue
1029 if st.st_size > 10000000:
1029 if st.st_size > 10000000:
1030 self.ui.warn(_("%s: files over 10MB may cause memory and"
1030 self.ui.warn(_("%s: files over 10MB may cause memory and"
1031 " performance problems\n"
1031 " performance problems\n"
1032 "(use 'hg revert %s' to unadd the file)\n")
1032 "(use 'hg revert %s' to unadd the file)\n")
1033 % (f, f))
1033 % (f, f))
1034 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1034 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1035 self.ui.warn(_("%s not added: only files and symlinks "
1035 self.ui.warn(_("%s not added: only files and symlinks "
1036 "supported currently\n") % f)
1036 "supported currently\n") % f)
1037 rejected.append(p)
1037 rejected.append(p)
1038 elif self.dirstate[f] in 'amn':
1038 elif self.dirstate[f] in 'amn':
1039 self.ui.warn(_("%s already tracked!\n") % f)
1039 self.ui.warn(_("%s already tracked!\n") % f)
1040 elif self.dirstate[f] == 'r':
1040 elif self.dirstate[f] == 'r':
1041 self.dirstate.normallookup(f)
1041 self.dirstate.normallookup(f)
1042 else:
1042 else:
1043 self.dirstate.add(f)
1043 self.dirstate.add(f)
1044 return rejected
1044 return rejected
1045 finally:
1045 finally:
1046 wlock.release()
1046 wlock.release()
1047
1047
1048 def forget(self, list):
1048 def forget(self, list):
1049 wlock = self.wlock()
1049 wlock = self.wlock()
1050 try:
1050 try:
1051 for f in list:
1051 for f in list:
1052 if self.dirstate[f] != 'a':
1052 if self.dirstate[f] != 'a':
1053 self.ui.warn(_("%s not added!\n") % f)
1053 self.ui.warn(_("%s not added!\n") % f)
1054 else:
1054 else:
1055 self.dirstate.forget(f)
1055 self.dirstate.forget(f)
1056 finally:
1056 finally:
1057 wlock.release()
1057 wlock.release()
1058
1058
1059 def remove(self, list, unlink=False):
1059 def remove(self, list, unlink=False):
1060 wlock = None
1060 wlock = None
1061 try:
1061 try:
1062 if unlink:
1062 if unlink:
1063 for f in list:
1063 for f in list:
1064 try:
1064 try:
1065 util.unlink(self.wjoin(f))
1065 util.unlink(self.wjoin(f))
1066 except OSError, inst:
1066 except OSError, inst:
1067 if inst.errno != errno.ENOENT:
1067 if inst.errno != errno.ENOENT:
1068 raise
1068 raise
1069 wlock = self.wlock()
1069 wlock = self.wlock()
1070 for f in list:
1070 for f in list:
1071 if unlink and os.path.exists(self.wjoin(f)):
1071 if unlink and os.path.exists(self.wjoin(f)):
1072 self.ui.warn(_("%s still exists!\n") % f)
1072 self.ui.warn(_("%s still exists!\n") % f)
1073 elif self.dirstate[f] == 'a':
1073 elif self.dirstate[f] == 'a':
1074 self.dirstate.forget(f)
1074 self.dirstate.forget(f)
1075 elif f not in self.dirstate:
1075 elif f not in self.dirstate:
1076 self.ui.warn(_("%s not tracked!\n") % f)
1076 self.ui.warn(_("%s not tracked!\n") % f)
1077 else:
1077 else:
1078 self.dirstate.remove(f)
1078 self.dirstate.remove(f)
1079 finally:
1079 finally:
1080 release(wlock)
1080 release(wlock)
1081
1081
1082 def undelete(self, list):
1082 def undelete(self, list):
1083 manifests = [self.manifest.read(self.changelog.read(p)[0])
1083 manifests = [self.manifest.read(self.changelog.read(p)[0])
1084 for p in self.dirstate.parents() if p != nullid]
1084 for p in self.dirstate.parents() if p != nullid]
1085 wlock = self.wlock()
1085 wlock = self.wlock()
1086 try:
1086 try:
1087 for f in list:
1087 for f in list:
1088 if self.dirstate[f] != 'r':
1088 if self.dirstate[f] != 'r':
1089 self.ui.warn(_("%s not removed!\n") % f)
1089 self.ui.warn(_("%s not removed!\n") % f)
1090 else:
1090 else:
1091 m = f in manifests[0] and manifests[0] or manifests[1]
1091 m = f in manifests[0] and manifests[0] or manifests[1]
1092 t = self.file(f).read(m[f])
1092 t = self.file(f).read(m[f])
1093 self.wwrite(f, t, m.flags(f))
1093 self.wwrite(f, t, m.flags(f))
1094 self.dirstate.normal(f)
1094 self.dirstate.normal(f)
1095 finally:
1095 finally:
1096 wlock.release()
1096 wlock.release()
1097
1097
1098 def copy(self, source, dest):
1098 def copy(self, source, dest):
1099 p = self.wjoin(dest)
1099 p = self.wjoin(dest)
1100 if not (os.path.exists(p) or os.path.islink(p)):
1100 if not (os.path.exists(p) or os.path.islink(p)):
1101 self.ui.warn(_("%s does not exist!\n") % dest)
1101 self.ui.warn(_("%s does not exist!\n") % dest)
1102 elif not (os.path.isfile(p) or os.path.islink(p)):
1102 elif not (os.path.isfile(p) or os.path.islink(p)):
1103 self.ui.warn(_("copy failed: %s is not a file or a "
1103 self.ui.warn(_("copy failed: %s is not a file or a "
1104 "symbolic link\n") % dest)
1104 "symbolic link\n") % dest)
1105 else:
1105 else:
1106 wlock = self.wlock()
1106 wlock = self.wlock()
1107 try:
1107 try:
1108 if self.dirstate[dest] in '?r':
1108 if self.dirstate[dest] in '?r':
1109 self.dirstate.add(dest)
1109 self.dirstate.add(dest)
1110 self.dirstate.copy(source, dest)
1110 self.dirstate.copy(source, dest)
1111 finally:
1111 finally:
1112 wlock.release()
1112 wlock.release()
1113
1113
1114 def heads(self, start=None, closed=True):
1114 def heads(self, start=None, closed=True):
1115 heads = self.changelog.heads(start)
1115 heads = self.changelog.heads(start)
1116 def display(head):
1116 def display(head):
1117 if closed:
1117 if closed:
1118 return True
1118 return True
1119 extras = self.changelog.read(head)[5]
1119 extras = self.changelog.read(head)[5]
1120 return ('close' not in extras)
1120 return ('close' not in extras)
1121 # sort the output in rev descending order
1121 # sort the output in rev descending order
1122 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1122 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1123 return [n for (r, n) in sorted(heads)]
1123 return [n for (r, n) in sorted(heads)]
1124
1124
1125 def branchheads(self, branch=None, start=None, closed=True):
1125 def branchheads(self, branch=None, start=None, closed=True):
1126 if branch is None:
1126 if branch is None:
1127 branch = self[None].branch()
1127 branch = self[None].branch()
1128 branches = self._branchheads()
1128 branches = self._branchheads()
1129 if branch not in branches:
1129 if branch not in branches:
1130 return []
1130 return []
1131 bheads = branches[branch]
1131 bheads = branches[branch]
1132 # the cache returns heads ordered lowest to highest
1132 # the cache returns heads ordered lowest to highest
1133 bheads.reverse()
1133 bheads.reverse()
1134 if start is not None:
1134 if start is not None:
1135 # filter out the heads that cannot be reached from startrev
1135 # filter out the heads that cannot be reached from startrev
1136 bheads = self.changelog.nodesbetween([start], bheads)[2]
1136 bheads = self.changelog.nodesbetween([start], bheads)[2]
1137 if not closed:
1137 if not closed:
1138 bheads = [h for h in bheads if
1138 bheads = [h for h in bheads if
1139 ('close' not in self.changelog.read(h)[5])]
1139 ('close' not in self.changelog.read(h)[5])]
1140 return bheads
1140 return bheads
1141
1141
1142 def branches(self, nodes):
1142 def branches(self, nodes):
1143 if not nodes:
1143 if not nodes:
1144 nodes = [self.changelog.tip()]
1144 nodes = [self.changelog.tip()]
1145 b = []
1145 b = []
1146 for n in nodes:
1146 for n in nodes:
1147 t = n
1147 t = n
1148 while 1:
1148 while 1:
1149 p = self.changelog.parents(n)
1149 p = self.changelog.parents(n)
1150 if p[1] != nullid or p[0] == nullid:
1150 if p[1] != nullid or p[0] == nullid:
1151 b.append((t, n, p[0], p[1]))
1151 b.append((t, n, p[0], p[1]))
1152 break
1152 break
1153 n = p[0]
1153 n = p[0]
1154 return b
1154 return b
1155
1155
1156 def between(self, pairs):
1156 def between(self, pairs):
1157 r = []
1157 r = []
1158
1158
1159 for top, bottom in pairs:
1159 for top, bottom in pairs:
1160 n, l, i = top, [], 0
1160 n, l, i = top, [], 0
1161 f = 1
1161 f = 1
1162
1162
1163 while n != bottom and n != nullid:
1163 while n != bottom and n != nullid:
1164 p = self.changelog.parents(n)[0]
1164 p = self.changelog.parents(n)[0]
1165 if i == f:
1165 if i == f:
1166 l.append(n)
1166 l.append(n)
1167 f = f * 2
1167 f = f * 2
1168 n = p
1168 n = p
1169 i += 1
1169 i += 1
1170
1170
1171 r.append(l)
1171 r.append(l)
1172
1172
1173 return r
1173 return r
1174
1174
1175 def findincoming(self, remote, base=None, heads=None, force=False):
1175 def findincoming(self, remote, base=None, heads=None, force=False):
1176 """Return list of roots of the subsets of missing nodes from remote
1176 """Return list of roots of the subsets of missing nodes from remote
1177
1177
1178 If base dict is specified, assume that these nodes and their parents
1178 If base dict is specified, assume that these nodes and their parents
1179 exist on the remote side and that no child of a node of base exists
1179 exist on the remote side and that no child of a node of base exists
1180 in both remote and self.
1180 in both remote and self.
1181 Furthermore base will be updated to include the nodes that exists
1181 Furthermore base will be updated to include the nodes that exists
1182 in self and remote but no children exists in self and remote.
1182 in self and remote but no children exists in self and remote.
1183 If a list of heads is specified, return only nodes which are heads
1183 If a list of heads is specified, return only nodes which are heads
1184 or ancestors of these heads.
1184 or ancestors of these heads.
1185
1185
1186 All the ancestors of base are in self and in remote.
1186 All the ancestors of base are in self and in remote.
1187 All the descendants of the list returned are missing in self.
1187 All the descendants of the list returned are missing in self.
1188 (and so we know that the rest of the nodes are missing in remote, see
1188 (and so we know that the rest of the nodes are missing in remote, see
1189 outgoing)
1189 outgoing)
1190 """
1190 """
1191 return self.findcommonincoming(remote, base, heads, force)[1]
1191 return self.findcommonincoming(remote, base, heads, force)[1]
1192
1192
1193 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1193 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1194 """Return a tuple (common, missing roots, heads) used to identify
1194 """Return a tuple (common, missing roots, heads) used to identify
1195 missing nodes from remote.
1195 missing nodes from remote.
1196
1196
1197 If base dict is specified, assume that these nodes and their parents
1197 If base dict is specified, assume that these nodes and their parents
1198 exist on the remote side and that no child of a node of base exists
1198 exist on the remote side and that no child of a node of base exists
1199 in both remote and self.
1199 in both remote and self.
1200 Furthermore base will be updated to include the nodes that exists
1200 Furthermore base will be updated to include the nodes that exists
1201 in self and remote but no children exists in self and remote.
1201 in self and remote but no children exists in self and remote.
1202 If a list of heads is specified, return only nodes which are heads
1202 If a list of heads is specified, return only nodes which are heads
1203 or ancestors of these heads.
1203 or ancestors of these heads.
1204
1204
1205 All the ancestors of base are in self and in remote.
1205 All the ancestors of base are in self and in remote.
1206 """
1206 """
1207 m = self.changelog.nodemap
1207 m = self.changelog.nodemap
1208 search = []
1208 search = []
1209 fetch = set()
1209 fetch = set()
1210 seen = set()
1210 seen = set()
1211 seenbranch = set()
1211 seenbranch = set()
1212 if base == None:
1212 if base is None:
1213 base = {}
1213 base = {}
1214
1214
1215 if not heads:
1215 if not heads:
1216 heads = remote.heads()
1216 heads = remote.heads()
1217
1217
1218 if self.changelog.tip() == nullid:
1218 if self.changelog.tip() == nullid:
1219 base[nullid] = 1
1219 base[nullid] = 1
1220 if heads != [nullid]:
1220 if heads != [nullid]:
1221 return [nullid], [nullid], list(heads)
1221 return [nullid], [nullid], list(heads)
1222 return [nullid], [], []
1222 return [nullid], [], []
1223
1223
1224 # assume we're closer to the tip than the root
1224 # assume we're closer to the tip than the root
1225 # and start by examining the heads
1225 # and start by examining the heads
1226 self.ui.status(_("searching for changes\n"))
1226 self.ui.status(_("searching for changes\n"))
1227
1227
1228 unknown = []
1228 unknown = []
1229 for h in heads:
1229 for h in heads:
1230 if h not in m:
1230 if h not in m:
1231 unknown.append(h)
1231 unknown.append(h)
1232 else:
1232 else:
1233 base[h] = 1
1233 base[h] = 1
1234
1234
1235 heads = unknown
1235 heads = unknown
1236 if not unknown:
1236 if not unknown:
1237 return base.keys(), [], []
1237 return base.keys(), [], []
1238
1238
1239 req = set(unknown)
1239 req = set(unknown)
1240 reqcnt = 0
1240 reqcnt = 0
1241
1241
1242 # search through remote branches
1242 # search through remote branches
1243 # a 'branch' here is a linear segment of history, with four parts:
1243 # a 'branch' here is a linear segment of history, with four parts:
1244 # head, root, first parent, second parent
1244 # head, root, first parent, second parent
1245 # (a branch always has two parents (or none) by definition)
1245 # (a branch always has two parents (or none) by definition)
1246 unknown = remote.branches(unknown)
1246 unknown = remote.branches(unknown)
1247 while unknown:
1247 while unknown:
1248 r = []
1248 r = []
1249 while unknown:
1249 while unknown:
1250 n = unknown.pop(0)
1250 n = unknown.pop(0)
1251 if n[0] in seen:
1251 if n[0] in seen:
1252 continue
1252 continue
1253
1253
1254 self.ui.debug(_("examining %s:%s\n")
1254 self.ui.debug(_("examining %s:%s\n")
1255 % (short(n[0]), short(n[1])))
1255 % (short(n[0]), short(n[1])))
1256 if n[0] == nullid: # found the end of the branch
1256 if n[0] == nullid: # found the end of the branch
1257 pass
1257 pass
1258 elif n in seenbranch:
1258 elif n in seenbranch:
1259 self.ui.debug(_("branch already found\n"))
1259 self.ui.debug(_("branch already found\n"))
1260 continue
1260 continue
1261 elif n[1] and n[1] in m: # do we know the base?
1261 elif n[1] and n[1] in m: # do we know the base?
1262 self.ui.debug(_("found incomplete branch %s:%s\n")
1262 self.ui.debug(_("found incomplete branch %s:%s\n")
1263 % (short(n[0]), short(n[1])))
1263 % (short(n[0]), short(n[1])))
1264 search.append(n[0:2]) # schedule branch range for scanning
1264 search.append(n[0:2]) # schedule branch range for scanning
1265 seenbranch.add(n)
1265 seenbranch.add(n)
1266 else:
1266 else:
1267 if n[1] not in seen and n[1] not in fetch:
1267 if n[1] not in seen and n[1] not in fetch:
1268 if n[2] in m and n[3] in m:
1268 if n[2] in m and n[3] in m:
1269 self.ui.debug(_("found new changeset %s\n") %
1269 self.ui.debug(_("found new changeset %s\n") %
1270 short(n[1]))
1270 short(n[1]))
1271 fetch.add(n[1]) # earliest unknown
1271 fetch.add(n[1]) # earliest unknown
1272 for p in n[2:4]:
1272 for p in n[2:4]:
1273 if p in m:
1273 if p in m:
1274 base[p] = 1 # latest known
1274 base[p] = 1 # latest known
1275
1275
1276 for p in n[2:4]:
1276 for p in n[2:4]:
1277 if p not in req and p not in m:
1277 if p not in req and p not in m:
1278 r.append(p)
1278 r.append(p)
1279 req.add(p)
1279 req.add(p)
1280 seen.add(n[0])
1280 seen.add(n[0])
1281
1281
1282 if r:
1282 if r:
1283 reqcnt += 1
1283 reqcnt += 1
1284 self.ui.debug(_("request %d: %s\n") %
1284 self.ui.debug(_("request %d: %s\n") %
1285 (reqcnt, " ".join(map(short, r))))
1285 (reqcnt, " ".join(map(short, r))))
1286 for p in xrange(0, len(r), 10):
1286 for p in xrange(0, len(r), 10):
1287 for b in remote.branches(r[p:p+10]):
1287 for b in remote.branches(r[p:p+10]):
1288 self.ui.debug(_("received %s:%s\n") %
1288 self.ui.debug(_("received %s:%s\n") %
1289 (short(b[0]), short(b[1])))
1289 (short(b[0]), short(b[1])))
1290 unknown.append(b)
1290 unknown.append(b)
1291
1291
1292 # do binary search on the branches we found
1292 # do binary search on the branches we found
1293 while search:
1293 while search:
1294 newsearch = []
1294 newsearch = []
1295 reqcnt += 1
1295 reqcnt += 1
1296 for n, l in zip(search, remote.between(search)):
1296 for n, l in zip(search, remote.between(search)):
1297 l.append(n[1])
1297 l.append(n[1])
1298 p = n[0]
1298 p = n[0]
1299 f = 1
1299 f = 1
1300 for i in l:
1300 for i in l:
1301 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1301 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1302 if i in m:
1302 if i in m:
1303 if f <= 2:
1303 if f <= 2:
1304 self.ui.debug(_("found new branch changeset %s\n") %
1304 self.ui.debug(_("found new branch changeset %s\n") %
1305 short(p))
1305 short(p))
1306 fetch.add(p)
1306 fetch.add(p)
1307 base[i] = 1
1307 base[i] = 1
1308 else:
1308 else:
1309 self.ui.debug(_("narrowed branch search to %s:%s\n")
1309 self.ui.debug(_("narrowed branch search to %s:%s\n")
1310 % (short(p), short(i)))
1310 % (short(p), short(i)))
1311 newsearch.append((p, i))
1311 newsearch.append((p, i))
1312 break
1312 break
1313 p, f = i, f * 2
1313 p, f = i, f * 2
1314 search = newsearch
1314 search = newsearch
1315
1315
1316 # sanity check our fetch list
1316 # sanity check our fetch list
1317 for f in fetch:
1317 for f in fetch:
1318 if f in m:
1318 if f in m:
1319 raise error.RepoError(_("already have changeset ")
1319 raise error.RepoError(_("already have changeset ")
1320 + short(f[:4]))
1320 + short(f[:4]))
1321
1321
1322 if base.keys() == [nullid]:
1322 if base.keys() == [nullid]:
1323 if force:
1323 if force:
1324 self.ui.warn(_("warning: repository is unrelated\n"))
1324 self.ui.warn(_("warning: repository is unrelated\n"))
1325 else:
1325 else:
1326 raise util.Abort(_("repository is unrelated"))
1326 raise util.Abort(_("repository is unrelated"))
1327
1327
1328 self.ui.debug(_("found new changesets starting at ") +
1328 self.ui.debug(_("found new changesets starting at ") +
1329 " ".join([short(f) for f in fetch]) + "\n")
1329 " ".join([short(f) for f in fetch]) + "\n")
1330
1330
1331 self.ui.debug(_("%d total queries\n") % reqcnt)
1331 self.ui.debug(_("%d total queries\n") % reqcnt)
1332
1332
1333 return base.keys(), list(fetch), heads
1333 return base.keys(), list(fetch), heads
1334
1334
1335 def findoutgoing(self, remote, base=None, heads=None, force=False):
1335 def findoutgoing(self, remote, base=None, heads=None, force=False):
1336 """Return list of nodes that are roots of subsets not in remote
1336 """Return list of nodes that are roots of subsets not in remote
1337
1337
1338 If base dict is specified, assume that these nodes and their parents
1338 If base dict is specified, assume that these nodes and their parents
1339 exist on the remote side.
1339 exist on the remote side.
1340 If a list of heads is specified, return only nodes which are heads
1340 If a list of heads is specified, return only nodes which are heads
1341 or ancestors of these heads, and return a second element which
1341 or ancestors of these heads, and return a second element which
1342 contains all remote heads which get new children.
1342 contains all remote heads which get new children.
1343 """
1343 """
1344 if base == None:
1344 if base is None:
1345 base = {}
1345 base = {}
1346 self.findincoming(remote, base, heads, force=force)
1346 self.findincoming(remote, base, heads, force=force)
1347
1347
1348 self.ui.debug(_("common changesets up to ")
1348 self.ui.debug(_("common changesets up to ")
1349 + " ".join(map(short, base.keys())) + "\n")
1349 + " ".join(map(short, base.keys())) + "\n")
1350
1350
1351 remain = set(self.changelog.nodemap)
1351 remain = set(self.changelog.nodemap)
1352
1352
1353 # prune everything remote has from the tree
1353 # prune everything remote has from the tree
1354 remain.remove(nullid)
1354 remain.remove(nullid)
1355 remove = base.keys()
1355 remove = base.keys()
1356 while remove:
1356 while remove:
1357 n = remove.pop(0)
1357 n = remove.pop(0)
1358 if n in remain:
1358 if n in remain:
1359 remain.remove(n)
1359 remain.remove(n)
1360 for p in self.changelog.parents(n):
1360 for p in self.changelog.parents(n):
1361 remove.append(p)
1361 remove.append(p)
1362
1362
1363 # find every node whose parents have been pruned
1363 # find every node whose parents have been pruned
1364 subset = []
1364 subset = []
1365 # find every remote head that will get new children
1365 # find every remote head that will get new children
1366 updated_heads = set()
1366 updated_heads = set()
1367 for n in remain:
1367 for n in remain:
1368 p1, p2 = self.changelog.parents(n)
1368 p1, p2 = self.changelog.parents(n)
1369 if p1 not in remain and p2 not in remain:
1369 if p1 not in remain and p2 not in remain:
1370 subset.append(n)
1370 subset.append(n)
1371 if heads:
1371 if heads:
1372 if p1 in heads:
1372 if p1 in heads:
1373 updated_heads.add(p1)
1373 updated_heads.add(p1)
1374 if p2 in heads:
1374 if p2 in heads:
1375 updated_heads.add(p2)
1375 updated_heads.add(p2)
1376
1376
1377 # this is the set of all roots we have to push
1377 # this is the set of all roots we have to push
1378 if heads:
1378 if heads:
1379 return subset, list(updated_heads)
1379 return subset, list(updated_heads)
1380 else:
1380 else:
1381 return subset
1381 return subset
1382
1382
1383 def pull(self, remote, heads=None, force=False):
1383 def pull(self, remote, heads=None, force=False):
1384 lock = self.lock()
1384 lock = self.lock()
1385 try:
1385 try:
1386 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1386 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1387 force=force)
1387 force=force)
1388 if fetch == [nullid]:
1388 if fetch == [nullid]:
1389 self.ui.status(_("requesting all changes\n"))
1389 self.ui.status(_("requesting all changes\n"))
1390
1390
1391 if not fetch:
1391 if not fetch:
1392 self.ui.status(_("no changes found\n"))
1392 self.ui.status(_("no changes found\n"))
1393 return 0
1393 return 0
1394
1394
1395 if heads is None and remote.capable('changegroupsubset'):
1395 if heads is None and remote.capable('changegroupsubset'):
1396 heads = rheads
1396 heads = rheads
1397
1397
1398 if heads is None:
1398 if heads is None:
1399 cg = remote.changegroup(fetch, 'pull')
1399 cg = remote.changegroup(fetch, 'pull')
1400 else:
1400 else:
1401 if not remote.capable('changegroupsubset'):
1401 if not remote.capable('changegroupsubset'):
1402 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1402 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1403 cg = remote.changegroupsubset(fetch, heads, 'pull')
1403 cg = remote.changegroupsubset(fetch, heads, 'pull')
1404 return self.addchangegroup(cg, 'pull', remote.url())
1404 return self.addchangegroup(cg, 'pull', remote.url())
1405 finally:
1405 finally:
1406 lock.release()
1406 lock.release()
1407
1407
1408 def push(self, remote, force=False, revs=None):
1408 def push(self, remote, force=False, revs=None):
1409 # there are two ways to push to remote repo:
1409 # there are two ways to push to remote repo:
1410 #
1410 #
1411 # addchangegroup assumes local user can lock remote
1411 # addchangegroup assumes local user can lock remote
1412 # repo (local filesystem, old ssh servers).
1412 # repo (local filesystem, old ssh servers).
1413 #
1413 #
1414 # unbundle assumes local user cannot lock remote repo (new ssh
1414 # unbundle assumes local user cannot lock remote repo (new ssh
1415 # servers, http servers).
1415 # servers, http servers).
1416
1416
1417 if remote.capable('unbundle'):
1417 if remote.capable('unbundle'):
1418 return self.push_unbundle(remote, force, revs)
1418 return self.push_unbundle(remote, force, revs)
1419 return self.push_addchangegroup(remote, force, revs)
1419 return self.push_addchangegroup(remote, force, revs)
1420
1420
1421 def prepush(self, remote, force, revs):
1421 def prepush(self, remote, force, revs):
1422 common = {}
1422 common = {}
1423 remote_heads = remote.heads()
1423 remote_heads = remote.heads()
1424 inc = self.findincoming(remote, common, remote_heads, force=force)
1424 inc = self.findincoming(remote, common, remote_heads, force=force)
1425
1425
1426 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1426 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1427 if revs is not None:
1427 if revs is not None:
1428 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1428 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1429 else:
1429 else:
1430 bases, heads = update, self.changelog.heads()
1430 bases, heads = update, self.changelog.heads()
1431
1431
1432 if not bases:
1432 if not bases:
1433 self.ui.status(_("no changes found\n"))
1433 self.ui.status(_("no changes found\n"))
1434 return None, 1
1434 return None, 1
1435 elif not force:
1435 elif not force:
1436 # check if we're creating new remote heads
1436 # check if we're creating new remote heads
1437 # to be a remote head after push, node must be either
1437 # to be a remote head after push, node must be either
1438 # - unknown locally
1438 # - unknown locally
1439 # - a local outgoing head descended from update
1439 # - a local outgoing head descended from update
1440 # - a remote head that's known locally and not
1440 # - a remote head that's known locally and not
1441 # ancestral to an outgoing head
1441 # ancestral to an outgoing head
1442
1442
1443 warn = 0
1443 warn = 0
1444
1444
1445 if remote_heads == [nullid]:
1445 if remote_heads == [nullid]:
1446 warn = 0
1446 warn = 0
1447 elif not revs and len(heads) > len(remote_heads):
1447 elif not revs and len(heads) > len(remote_heads):
1448 warn = 1
1448 warn = 1
1449 else:
1449 else:
1450 newheads = list(heads)
1450 newheads = list(heads)
1451 for r in remote_heads:
1451 for r in remote_heads:
1452 if r in self.changelog.nodemap:
1452 if r in self.changelog.nodemap:
1453 desc = self.changelog.heads(r, heads)
1453 desc = self.changelog.heads(r, heads)
1454 l = [h for h in heads if h in desc]
1454 l = [h for h in heads if h in desc]
1455 if not l:
1455 if not l:
1456 newheads.append(r)
1456 newheads.append(r)
1457 else:
1457 else:
1458 newheads.append(r)
1458 newheads.append(r)
1459 if len(newheads) > len(remote_heads):
1459 if len(newheads) > len(remote_heads):
1460 warn = 1
1460 warn = 1
1461
1461
1462 if warn:
1462 if warn:
1463 self.ui.warn(_("abort: push creates new remote heads!\n"))
1463 self.ui.warn(_("abort: push creates new remote heads!\n"))
1464 self.ui.status(_("(did you forget to merge?"
1464 self.ui.status(_("(did you forget to merge?"
1465 " use push -f to force)\n"))
1465 " use push -f to force)\n"))
1466 return None, 0
1466 return None, 0
1467 elif inc:
1467 elif inc:
1468 self.ui.warn(_("note: unsynced remote changes!\n"))
1468 self.ui.warn(_("note: unsynced remote changes!\n"))
1469
1469
1470
1470
1471 if revs is None:
1471 if revs is None:
1472 # use the fast path, no race possible on push
1472 # use the fast path, no race possible on push
1473 cg = self._changegroup(common.keys(), 'push')
1473 cg = self._changegroup(common.keys(), 'push')
1474 else:
1474 else:
1475 cg = self.changegroupsubset(update, revs, 'push')
1475 cg = self.changegroupsubset(update, revs, 'push')
1476 return cg, remote_heads
1476 return cg, remote_heads
1477
1477
1478 def push_addchangegroup(self, remote, force, revs):
1478 def push_addchangegroup(self, remote, force, revs):
1479 lock = remote.lock()
1479 lock = remote.lock()
1480 try:
1480 try:
1481 ret = self.prepush(remote, force, revs)
1481 ret = self.prepush(remote, force, revs)
1482 if ret[0] is not None:
1482 if ret[0] is not None:
1483 cg, remote_heads = ret
1483 cg, remote_heads = ret
1484 return remote.addchangegroup(cg, 'push', self.url())
1484 return remote.addchangegroup(cg, 'push', self.url())
1485 return ret[1]
1485 return ret[1]
1486 finally:
1486 finally:
1487 lock.release()
1487 lock.release()
1488
1488
1489 def push_unbundle(self, remote, force, revs):
1489 def push_unbundle(self, remote, force, revs):
1490 # local repo finds heads on server, finds out what revs it
1490 # local repo finds heads on server, finds out what revs it
1491 # must push. once revs transferred, if server finds it has
1491 # must push. once revs transferred, if server finds it has
1492 # different heads (someone else won commit/push race), server
1492 # different heads (someone else won commit/push race), server
1493 # aborts.
1493 # aborts.
1494
1494
1495 ret = self.prepush(remote, force, revs)
1495 ret = self.prepush(remote, force, revs)
1496 if ret[0] is not None:
1496 if ret[0] is not None:
1497 cg, remote_heads = ret
1497 cg, remote_heads = ret
1498 if force: remote_heads = ['force']
1498 if force: remote_heads = ['force']
1499 return remote.unbundle(cg, remote_heads, 'push')
1499 return remote.unbundle(cg, remote_heads, 'push')
1500 return ret[1]
1500 return ret[1]
1501
1501
1502 def changegroupinfo(self, nodes, source):
1502 def changegroupinfo(self, nodes, source):
1503 if self.ui.verbose or source == 'bundle':
1503 if self.ui.verbose or source == 'bundle':
1504 self.ui.status(_("%d changesets found\n") % len(nodes))
1504 self.ui.status(_("%d changesets found\n") % len(nodes))
1505 if self.ui.debugflag:
1505 if self.ui.debugflag:
1506 self.ui.debug(_("list of changesets:\n"))
1506 self.ui.debug(_("list of changesets:\n"))
1507 for node in nodes:
1507 for node in nodes:
1508 self.ui.debug("%s\n" % hex(node))
1508 self.ui.debug("%s\n" % hex(node))
1509
1509
1510 def changegroupsubset(self, bases, heads, source, extranodes=None):
1510 def changegroupsubset(self, bases, heads, source, extranodes=None):
1511 """This function generates a changegroup consisting of all the nodes
1511 """This function generates a changegroup consisting of all the nodes
1512 that are descendents of any of the bases, and ancestors of any of
1512 that are descendents of any of the bases, and ancestors of any of
1513 the heads.
1513 the heads.
1514
1514
1515 It is fairly complex as determining which filenodes and which
1515 It is fairly complex as determining which filenodes and which
1516 manifest nodes need to be included for the changeset to be complete
1516 manifest nodes need to be included for the changeset to be complete
1517 is non-trivial.
1517 is non-trivial.
1518
1518
1519 Another wrinkle is doing the reverse, figuring out which changeset in
1519 Another wrinkle is doing the reverse, figuring out which changeset in
1520 the changegroup a particular filenode or manifestnode belongs to.
1520 the changegroup a particular filenode or manifestnode belongs to.
1521
1521
1522 The caller can specify some nodes that must be included in the
1522 The caller can specify some nodes that must be included in the
1523 changegroup using the extranodes argument. It should be a dict
1523 changegroup using the extranodes argument. It should be a dict
1524 where the keys are the filenames (or 1 for the manifest), and the
1524 where the keys are the filenames (or 1 for the manifest), and the
1525 values are lists of (node, linknode) tuples, where node is a wanted
1525 values are lists of (node, linknode) tuples, where node is a wanted
1526 node and linknode is the changelog node that should be transmitted as
1526 node and linknode is the changelog node that should be transmitted as
1527 the linkrev.
1527 the linkrev.
1528 """
1528 """
1529
1529
1530 if extranodes is None:
1530 if extranodes is None:
1531 # can we go through the fast path ?
1531 # can we go through the fast path ?
1532 heads.sort()
1532 heads.sort()
1533 allheads = self.heads()
1533 allheads = self.heads()
1534 allheads.sort()
1534 allheads.sort()
1535 if heads == allheads:
1535 if heads == allheads:
1536 common = []
1536 common = []
1537 # parents of bases are known from both sides
1537 # parents of bases are known from both sides
1538 for n in bases:
1538 for n in bases:
1539 for p in self.changelog.parents(n):
1539 for p in self.changelog.parents(n):
1540 if p != nullid:
1540 if p != nullid:
1541 common.append(p)
1541 common.append(p)
1542 return self._changegroup(common, source)
1542 return self._changegroup(common, source)
1543
1543
1544 self.hook('preoutgoing', throw=True, source=source)
1544 self.hook('preoutgoing', throw=True, source=source)
1545
1545
1546 # Set up some initial variables
1546 # Set up some initial variables
1547 # Make it easy to refer to self.changelog
1547 # Make it easy to refer to self.changelog
1548 cl = self.changelog
1548 cl = self.changelog
1549 # msng is short for missing - compute the list of changesets in this
1549 # msng is short for missing - compute the list of changesets in this
1550 # changegroup.
1550 # changegroup.
1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1552 self.changegroupinfo(msng_cl_lst, source)
1552 self.changegroupinfo(msng_cl_lst, source)
1553 # Some bases may turn out to be superfluous, and some heads may be
1553 # Some bases may turn out to be superfluous, and some heads may be
1554 # too. nodesbetween will return the minimal set of bases and heads
1554 # too. nodesbetween will return the minimal set of bases and heads
1555 # necessary to re-create the changegroup.
1555 # necessary to re-create the changegroup.
1556
1556
1557 # Known heads are the list of heads that it is assumed the recipient
1557 # Known heads are the list of heads that it is assumed the recipient
1558 # of this changegroup will know about.
1558 # of this changegroup will know about.
1559 knownheads = set()
1559 knownheads = set()
1560 # We assume that all parents of bases are known heads.
1560 # We assume that all parents of bases are known heads.
1561 for n in bases:
1561 for n in bases:
1562 knownheads.update(cl.parents(n))
1562 knownheads.update(cl.parents(n))
1563 knownheads.discard(nullid)
1563 knownheads.discard(nullid)
1564 knownheads = list(knownheads)
1564 knownheads = list(knownheads)
1565 if knownheads:
1565 if knownheads:
1566 # Now that we know what heads are known, we can compute which
1566 # Now that we know what heads are known, we can compute which
1567 # changesets are known. The recipient must know about all
1567 # changesets are known. The recipient must know about all
1568 # changesets required to reach the known heads from the null
1568 # changesets required to reach the known heads from the null
1569 # changeset.
1569 # changeset.
1570 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1570 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1571 junk = None
1571 junk = None
1572 # Transform the list into a set.
1572 # Transform the list into a set.
1573 has_cl_set = set(has_cl_set)
1573 has_cl_set = set(has_cl_set)
1574 else:
1574 else:
1575 # If there were no known heads, the recipient cannot be assumed to
1575 # If there were no known heads, the recipient cannot be assumed to
1576 # know about any changesets.
1576 # know about any changesets.
1577 has_cl_set = set()
1577 has_cl_set = set()
1578
1578
1579 # Make it easy to refer to self.manifest
1579 # Make it easy to refer to self.manifest
1580 mnfst = self.manifest
1580 mnfst = self.manifest
1581 # We don't know which manifests are missing yet
1581 # We don't know which manifests are missing yet
1582 msng_mnfst_set = {}
1582 msng_mnfst_set = {}
1583 # Nor do we know which filenodes are missing.
1583 # Nor do we know which filenodes are missing.
1584 msng_filenode_set = {}
1584 msng_filenode_set = {}
1585
1585
1586 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1586 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1587 junk = None
1587 junk = None
1588
1588
1589 # A changeset always belongs to itself, so the changenode lookup
1589 # A changeset always belongs to itself, so the changenode lookup
1590 # function for a changenode is identity.
1590 # function for a changenode is identity.
1591 def identity(x):
1591 def identity(x):
1592 return x
1592 return x
1593
1593
1594 # A function generating function. Sets up an environment for the
1594 # A function generating function. Sets up an environment for the
1595 # inner function.
1595 # inner function.
1596 def cmp_by_rev_func(revlog):
1596 def cmp_by_rev_func(revlog):
1597 # Compare two nodes by their revision number in the environment's
1597 # Compare two nodes by their revision number in the environment's
1598 # revision history. Since the revision number both represents the
1598 # revision history. Since the revision number both represents the
1599 # most efficient order to read the nodes in, and represents a
1599 # most efficient order to read the nodes in, and represents a
1600 # topological sorting of the nodes, this function is often useful.
1600 # topological sorting of the nodes, this function is often useful.
1601 def cmp_by_rev(a, b):
1601 def cmp_by_rev(a, b):
1602 return cmp(revlog.rev(a), revlog.rev(b))
1602 return cmp(revlog.rev(a), revlog.rev(b))
1603 return cmp_by_rev
1603 return cmp_by_rev
1604
1604
1605 # If we determine that a particular file or manifest node must be a
1605 # If we determine that a particular file or manifest node must be a
1606 # node that the recipient of the changegroup will already have, we can
1606 # node that the recipient of the changegroup will already have, we can
1607 # also assume the recipient will have all the parents. This function
1607 # also assume the recipient will have all the parents. This function
1608 # prunes them from the set of missing nodes.
1608 # prunes them from the set of missing nodes.
1609 def prune_parents(revlog, hasset, msngset):
1609 def prune_parents(revlog, hasset, msngset):
1610 haslst = list(hasset)
1610 haslst = list(hasset)
1611 haslst.sort(cmp_by_rev_func(revlog))
1611 haslst.sort(cmp_by_rev_func(revlog))
1612 for node in haslst:
1612 for node in haslst:
1613 parentlst = [p for p in revlog.parents(node) if p != nullid]
1613 parentlst = [p for p in revlog.parents(node) if p != nullid]
1614 while parentlst:
1614 while parentlst:
1615 n = parentlst.pop()
1615 n = parentlst.pop()
1616 if n not in hasset:
1616 if n not in hasset:
1617 hasset.add(n)
1617 hasset.add(n)
1618 p = [p for p in revlog.parents(n) if p != nullid]
1618 p = [p for p in revlog.parents(n) if p != nullid]
1619 parentlst.extend(p)
1619 parentlst.extend(p)
1620 for n in hasset:
1620 for n in hasset:
1621 msngset.pop(n, None)
1621 msngset.pop(n, None)
1622
1622
1623 # This is a function generating function used to set up an environment
1623 # This is a function generating function used to set up an environment
1624 # for the inner function to execute in.
1624 # for the inner function to execute in.
1625 def manifest_and_file_collector(changedfileset):
1625 def manifest_and_file_collector(changedfileset):
1626 # This is an information gathering function that gathers
1626 # This is an information gathering function that gathers
1627 # information from each changeset node that goes out as part of
1627 # information from each changeset node that goes out as part of
1628 # the changegroup. The information gathered is a list of which
1628 # the changegroup. The information gathered is a list of which
1629 # manifest nodes are potentially required (the recipient may
1629 # manifest nodes are potentially required (the recipient may
1630 # already have them) and total list of all files which were
1630 # already have them) and total list of all files which were
1631 # changed in any changeset in the changegroup.
1631 # changed in any changeset in the changegroup.
1632 #
1632 #
1633 # We also remember the first changenode we saw any manifest
1633 # We also remember the first changenode we saw any manifest
1634 # referenced by so we can later determine which changenode 'owns'
1634 # referenced by so we can later determine which changenode 'owns'
1635 # the manifest.
1635 # the manifest.
1636 def collect_manifests_and_files(clnode):
1636 def collect_manifests_and_files(clnode):
1637 c = cl.read(clnode)
1637 c = cl.read(clnode)
1638 for f in c[3]:
1638 for f in c[3]:
1639 # This is to make sure we only have one instance of each
1639 # This is to make sure we only have one instance of each
1640 # filename string for each filename.
1640 # filename string for each filename.
1641 changedfileset.setdefault(f, f)
1641 changedfileset.setdefault(f, f)
1642 msng_mnfst_set.setdefault(c[0], clnode)
1642 msng_mnfst_set.setdefault(c[0], clnode)
1643 return collect_manifests_and_files
1643 return collect_manifests_and_files
1644
1644
1645 # Figure out which manifest nodes (of the ones we think might be part
1645 # Figure out which manifest nodes (of the ones we think might be part
1646 # of the changegroup) the recipient must know about and remove them
1646 # of the changegroup) the recipient must know about and remove them
1647 # from the changegroup.
1647 # from the changegroup.
1648 def prune_manifests():
1648 def prune_manifests():
1649 has_mnfst_set = set()
1649 has_mnfst_set = set()
1650 for n in msng_mnfst_set:
1650 for n in msng_mnfst_set:
1651 # If a 'missing' manifest thinks it belongs to a changenode
1651 # If a 'missing' manifest thinks it belongs to a changenode
1652 # the recipient is assumed to have, obviously the recipient
1652 # the recipient is assumed to have, obviously the recipient
1653 # must have that manifest.
1653 # must have that manifest.
1654 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1654 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1655 if linknode in has_cl_set:
1655 if linknode in has_cl_set:
1656 has_mnfst_set.add(n)
1656 has_mnfst_set.add(n)
1657 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1657 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1658
1658
1659 # Use the information collected in collect_manifests_and_files to say
1659 # Use the information collected in collect_manifests_and_files to say
1660 # which changenode any manifestnode belongs to.
1660 # which changenode any manifestnode belongs to.
1661 def lookup_manifest_link(mnfstnode):
1661 def lookup_manifest_link(mnfstnode):
1662 return msng_mnfst_set[mnfstnode]
1662 return msng_mnfst_set[mnfstnode]
1663
1663
1664 # A function generating function that sets up the initial environment
1664 # A function generating function that sets up the initial environment
1665 # the inner function.
1665 # the inner function.
1666 def filenode_collector(changedfiles):
1666 def filenode_collector(changedfiles):
1667 next_rev = [0]
1667 next_rev = [0]
1668 # This gathers information from each manifestnode included in the
1668 # This gathers information from each manifestnode included in the
1669 # changegroup about which filenodes the manifest node references
1669 # changegroup about which filenodes the manifest node references
1670 # so we can include those in the changegroup too.
1670 # so we can include those in the changegroup too.
1671 #
1671 #
1672 # It also remembers which changenode each filenode belongs to. It
1672 # It also remembers which changenode each filenode belongs to. It
1673 # does this by assuming the a filenode belongs to the changenode
1673 # does this by assuming the a filenode belongs to the changenode
1674 # the first manifest that references it belongs to.
1674 # the first manifest that references it belongs to.
1675 def collect_msng_filenodes(mnfstnode):
1675 def collect_msng_filenodes(mnfstnode):
1676 r = mnfst.rev(mnfstnode)
1676 r = mnfst.rev(mnfstnode)
1677 if r == next_rev[0]:
1677 if r == next_rev[0]:
1678 # If the last rev we looked at was the one just previous,
1678 # If the last rev we looked at was the one just previous,
1679 # we only need to see a diff.
1679 # we only need to see a diff.
1680 deltamf = mnfst.readdelta(mnfstnode)
1680 deltamf = mnfst.readdelta(mnfstnode)
1681 # For each line in the delta
1681 # For each line in the delta
1682 for f, fnode in deltamf.iteritems():
1682 for f, fnode in deltamf.iteritems():
1683 f = changedfiles.get(f, None)
1683 f = changedfiles.get(f, None)
1684 # And if the file is in the list of files we care
1684 # And if the file is in the list of files we care
1685 # about.
1685 # about.
1686 if f is not None:
1686 if f is not None:
1687 # Get the changenode this manifest belongs to
1687 # Get the changenode this manifest belongs to
1688 clnode = msng_mnfst_set[mnfstnode]
1688 clnode = msng_mnfst_set[mnfstnode]
1689 # Create the set of filenodes for the file if
1689 # Create the set of filenodes for the file if
1690 # there isn't one already.
1690 # there isn't one already.
1691 ndset = msng_filenode_set.setdefault(f, {})
1691 ndset = msng_filenode_set.setdefault(f, {})
1692 # And set the filenode's changelog node to the
1692 # And set the filenode's changelog node to the
1693 # manifest's if it hasn't been set already.
1693 # manifest's if it hasn't been set already.
1694 ndset.setdefault(fnode, clnode)
1694 ndset.setdefault(fnode, clnode)
1695 else:
1695 else:
1696 # Otherwise we need a full manifest.
1696 # Otherwise we need a full manifest.
1697 m = mnfst.read(mnfstnode)
1697 m = mnfst.read(mnfstnode)
1698 # For every file in we care about.
1698 # For every file in we care about.
1699 for f in changedfiles:
1699 for f in changedfiles:
1700 fnode = m.get(f, None)
1700 fnode = m.get(f, None)
1701 # If it's in the manifest
1701 # If it's in the manifest
1702 if fnode is not None:
1702 if fnode is not None:
1703 # See comments above.
1703 # See comments above.
1704 clnode = msng_mnfst_set[mnfstnode]
1704 clnode = msng_mnfst_set[mnfstnode]
1705 ndset = msng_filenode_set.setdefault(f, {})
1705 ndset = msng_filenode_set.setdefault(f, {})
1706 ndset.setdefault(fnode, clnode)
1706 ndset.setdefault(fnode, clnode)
1707 # Remember the revision we hope to see next.
1707 # Remember the revision we hope to see next.
1708 next_rev[0] = r + 1
1708 next_rev[0] = r + 1
1709 return collect_msng_filenodes
1709 return collect_msng_filenodes
1710
1710
1711 # We have a list of filenodes we think we need for a file, lets remove
1711 # We have a list of filenodes we think we need for a file, lets remove
1712 # all those we know the recipient must have.
1712 # all those we know the recipient must have.
1713 def prune_filenodes(f, filerevlog):
1713 def prune_filenodes(f, filerevlog):
1714 msngset = msng_filenode_set[f]
1714 msngset = msng_filenode_set[f]
1715 hasset = set()
1715 hasset = set()
1716 # If a 'missing' filenode thinks it belongs to a changenode we
1716 # If a 'missing' filenode thinks it belongs to a changenode we
1717 # assume the recipient must have, then the recipient must have
1717 # assume the recipient must have, then the recipient must have
1718 # that filenode.
1718 # that filenode.
1719 for n in msngset:
1719 for n in msngset:
1720 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1720 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1721 if clnode in has_cl_set:
1721 if clnode in has_cl_set:
1722 hasset.add(n)
1722 hasset.add(n)
1723 prune_parents(filerevlog, hasset, msngset)
1723 prune_parents(filerevlog, hasset, msngset)
1724
1724
1725 # A function generator function that sets up the a context for the
1725 # A function generator function that sets up the a context for the
1726 # inner function.
1726 # inner function.
1727 def lookup_filenode_link_func(fname):
1727 def lookup_filenode_link_func(fname):
1728 msngset = msng_filenode_set[fname]
1728 msngset = msng_filenode_set[fname]
1729 # Lookup the changenode the filenode belongs to.
1729 # Lookup the changenode the filenode belongs to.
1730 def lookup_filenode_link(fnode):
1730 def lookup_filenode_link(fnode):
1731 return msngset[fnode]
1731 return msngset[fnode]
1732 return lookup_filenode_link
1732 return lookup_filenode_link
1733
1733
1734 # Add the nodes that were explicitly requested.
1734 # Add the nodes that were explicitly requested.
1735 def add_extra_nodes(name, nodes):
1735 def add_extra_nodes(name, nodes):
1736 if not extranodes or name not in extranodes:
1736 if not extranodes or name not in extranodes:
1737 return
1737 return
1738
1738
1739 for node, linknode in extranodes[name]:
1739 for node, linknode in extranodes[name]:
1740 if node not in nodes:
1740 if node not in nodes:
1741 nodes[node] = linknode
1741 nodes[node] = linknode
1742
1742
1743 # Now that we have all theses utility functions to help out and
1743 # Now that we have all theses utility functions to help out and
1744 # logically divide up the task, generate the group.
1744 # logically divide up the task, generate the group.
1745 def gengroup():
1745 def gengroup():
1746 # The set of changed files starts empty.
1746 # The set of changed files starts empty.
1747 changedfiles = {}
1747 changedfiles = {}
1748 # Create a changenode group generator that will call our functions
1748 # Create a changenode group generator that will call our functions
1749 # back to lookup the owning changenode and collect information.
1749 # back to lookup the owning changenode and collect information.
1750 group = cl.group(msng_cl_lst, identity,
1750 group = cl.group(msng_cl_lst, identity,
1751 manifest_and_file_collector(changedfiles))
1751 manifest_and_file_collector(changedfiles))
1752 for chnk in group:
1752 for chnk in group:
1753 yield chnk
1753 yield chnk
1754
1754
1755 # The list of manifests has been collected by the generator
1755 # The list of manifests has been collected by the generator
1756 # calling our functions back.
1756 # calling our functions back.
1757 prune_manifests()
1757 prune_manifests()
1758 add_extra_nodes(1, msng_mnfst_set)
1758 add_extra_nodes(1, msng_mnfst_set)
1759 msng_mnfst_lst = msng_mnfst_set.keys()
1759 msng_mnfst_lst = msng_mnfst_set.keys()
1760 # Sort the manifestnodes by revision number.
1760 # Sort the manifestnodes by revision number.
1761 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1761 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1762 # Create a generator for the manifestnodes that calls our lookup
1762 # Create a generator for the manifestnodes that calls our lookup
1763 # and data collection functions back.
1763 # and data collection functions back.
1764 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1764 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1765 filenode_collector(changedfiles))
1765 filenode_collector(changedfiles))
1766 for chnk in group:
1766 for chnk in group:
1767 yield chnk
1767 yield chnk
1768
1768
1769 # These are no longer needed, dereference and toss the memory for
1769 # These are no longer needed, dereference and toss the memory for
1770 # them.
1770 # them.
1771 msng_mnfst_lst = None
1771 msng_mnfst_lst = None
1772 msng_mnfst_set.clear()
1772 msng_mnfst_set.clear()
1773
1773
1774 if extranodes:
1774 if extranodes:
1775 for fname in extranodes:
1775 for fname in extranodes:
1776 if isinstance(fname, int):
1776 if isinstance(fname, int):
1777 continue
1777 continue
1778 msng_filenode_set.setdefault(fname, {})
1778 msng_filenode_set.setdefault(fname, {})
1779 changedfiles[fname] = 1
1779 changedfiles[fname] = 1
1780 # Go through all our files in order sorted by name.
1780 # Go through all our files in order sorted by name.
1781 for fname in sorted(changedfiles):
1781 for fname in sorted(changedfiles):
1782 filerevlog = self.file(fname)
1782 filerevlog = self.file(fname)
1783 if not len(filerevlog):
1783 if not len(filerevlog):
1784 raise util.Abort(_("empty or missing revlog for %s") % fname)
1784 raise util.Abort(_("empty or missing revlog for %s") % fname)
1785 # Toss out the filenodes that the recipient isn't really
1785 # Toss out the filenodes that the recipient isn't really
1786 # missing.
1786 # missing.
1787 if fname in msng_filenode_set:
1787 if fname in msng_filenode_set:
1788 prune_filenodes(fname, filerevlog)
1788 prune_filenodes(fname, filerevlog)
1789 add_extra_nodes(fname, msng_filenode_set[fname])
1789 add_extra_nodes(fname, msng_filenode_set[fname])
1790 msng_filenode_lst = msng_filenode_set[fname].keys()
1790 msng_filenode_lst = msng_filenode_set[fname].keys()
1791 else:
1791 else:
1792 msng_filenode_lst = []
1792 msng_filenode_lst = []
1793 # If any filenodes are left, generate the group for them,
1793 # If any filenodes are left, generate the group for them,
1794 # otherwise don't bother.
1794 # otherwise don't bother.
1795 if len(msng_filenode_lst) > 0:
1795 if len(msng_filenode_lst) > 0:
1796 yield changegroup.chunkheader(len(fname))
1796 yield changegroup.chunkheader(len(fname))
1797 yield fname
1797 yield fname
1798 # Sort the filenodes by their revision #
1798 # Sort the filenodes by their revision #
1799 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1799 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1800 # Create a group generator and only pass in a changenode
1800 # Create a group generator and only pass in a changenode
1801 # lookup function as we need to collect no information
1801 # lookup function as we need to collect no information
1802 # from filenodes.
1802 # from filenodes.
1803 group = filerevlog.group(msng_filenode_lst,
1803 group = filerevlog.group(msng_filenode_lst,
1804 lookup_filenode_link_func(fname))
1804 lookup_filenode_link_func(fname))
1805 for chnk in group:
1805 for chnk in group:
1806 yield chnk
1806 yield chnk
1807 if fname in msng_filenode_set:
1807 if fname in msng_filenode_set:
1808 # Don't need this anymore, toss it to free memory.
1808 # Don't need this anymore, toss it to free memory.
1809 del msng_filenode_set[fname]
1809 del msng_filenode_set[fname]
1810 # Signal that no more groups are left.
1810 # Signal that no more groups are left.
1811 yield changegroup.closechunk()
1811 yield changegroup.closechunk()
1812
1812
1813 if msng_cl_lst:
1813 if msng_cl_lst:
1814 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1814 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1815
1815
1816 return util.chunkbuffer(gengroup())
1816 return util.chunkbuffer(gengroup())
1817
1817
1818 def changegroup(self, basenodes, source):
1818 def changegroup(self, basenodes, source):
1819 # to avoid a race we use changegroupsubset() (issue1320)
1819 # to avoid a race we use changegroupsubset() (issue1320)
1820 return self.changegroupsubset(basenodes, self.heads(), source)
1820 return self.changegroupsubset(basenodes, self.heads(), source)
1821
1821
1822 def _changegroup(self, common, source):
1822 def _changegroup(self, common, source):
1823 """Generate a changegroup of all nodes that we have that a recipient
1823 """Generate a changegroup of all nodes that we have that a recipient
1824 doesn't.
1824 doesn't.
1825
1825
1826 This is much easier than the previous function as we can assume that
1826 This is much easier than the previous function as we can assume that
1827 the recipient has any changenode we aren't sending them.
1827 the recipient has any changenode we aren't sending them.
1828
1828
1829 common is the set of common nodes between remote and self"""
1829 common is the set of common nodes between remote and self"""
1830
1830
1831 self.hook('preoutgoing', throw=True, source=source)
1831 self.hook('preoutgoing', throw=True, source=source)
1832
1832
1833 cl = self.changelog
1833 cl = self.changelog
1834 nodes = cl.findmissing(common)
1834 nodes = cl.findmissing(common)
1835 revset = set([cl.rev(n) for n in nodes])
1835 revset = set([cl.rev(n) for n in nodes])
1836 self.changegroupinfo(nodes, source)
1836 self.changegroupinfo(nodes, source)
1837
1837
1838 def identity(x):
1838 def identity(x):
1839 return x
1839 return x
1840
1840
1841 def gennodelst(log):
1841 def gennodelst(log):
1842 for r in log:
1842 for r in log:
1843 if log.linkrev(r) in revset:
1843 if log.linkrev(r) in revset:
1844 yield log.node(r)
1844 yield log.node(r)
1845
1845
1846 def changed_file_collector(changedfileset):
1846 def changed_file_collector(changedfileset):
1847 def collect_changed_files(clnode):
1847 def collect_changed_files(clnode):
1848 c = cl.read(clnode)
1848 c = cl.read(clnode)
1849 changedfileset.update(c[3])
1849 changedfileset.update(c[3])
1850 return collect_changed_files
1850 return collect_changed_files
1851
1851
1852 def lookuprevlink_func(revlog):
1852 def lookuprevlink_func(revlog):
1853 def lookuprevlink(n):
1853 def lookuprevlink(n):
1854 return cl.node(revlog.linkrev(revlog.rev(n)))
1854 return cl.node(revlog.linkrev(revlog.rev(n)))
1855 return lookuprevlink
1855 return lookuprevlink
1856
1856
1857 def gengroup():
1857 def gengroup():
1858 # construct a list of all changed files
1858 # construct a list of all changed files
1859 changedfiles = set()
1859 changedfiles = set()
1860
1860
1861 for chnk in cl.group(nodes, identity,
1861 for chnk in cl.group(nodes, identity,
1862 changed_file_collector(changedfiles)):
1862 changed_file_collector(changedfiles)):
1863 yield chnk
1863 yield chnk
1864
1864
1865 mnfst = self.manifest
1865 mnfst = self.manifest
1866 nodeiter = gennodelst(mnfst)
1866 nodeiter = gennodelst(mnfst)
1867 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1867 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1868 yield chnk
1868 yield chnk
1869
1869
1870 for fname in sorted(changedfiles):
1870 for fname in sorted(changedfiles):
1871 filerevlog = self.file(fname)
1871 filerevlog = self.file(fname)
1872 if not len(filerevlog):
1872 if not len(filerevlog):
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1874 nodeiter = gennodelst(filerevlog)
1874 nodeiter = gennodelst(filerevlog)
1875 nodeiter = list(nodeiter)
1875 nodeiter = list(nodeiter)
1876 if nodeiter:
1876 if nodeiter:
1877 yield changegroup.chunkheader(len(fname))
1877 yield changegroup.chunkheader(len(fname))
1878 yield fname
1878 yield fname
1879 lookup = lookuprevlink_func(filerevlog)
1879 lookup = lookuprevlink_func(filerevlog)
1880 for chnk in filerevlog.group(nodeiter, lookup):
1880 for chnk in filerevlog.group(nodeiter, lookup):
1881 yield chnk
1881 yield chnk
1882
1882
1883 yield changegroup.closechunk()
1883 yield changegroup.closechunk()
1884
1884
1885 if nodes:
1885 if nodes:
1886 self.hook('outgoing', node=hex(nodes[0]), source=source)
1886 self.hook('outgoing', node=hex(nodes[0]), source=source)
1887
1887
1888 return util.chunkbuffer(gengroup())
1888 return util.chunkbuffer(gengroup())
1889
1889
1890 def addchangegroup(self, source, srctype, url, emptyok=False):
1890 def addchangegroup(self, source, srctype, url, emptyok=False):
1891 """add changegroup to repo.
1891 """add changegroup to repo.
1892
1892
1893 return values:
1893 return values:
1894 - nothing changed or no source: 0
1894 - nothing changed or no source: 0
1895 - more heads than before: 1+added heads (2..n)
1895 - more heads than before: 1+added heads (2..n)
1896 - less heads than before: -1-removed heads (-2..-n)
1896 - less heads than before: -1-removed heads (-2..-n)
1897 - number of heads stays the same: 1
1897 - number of heads stays the same: 1
1898 """
1898 """
1899 def csmap(x):
1899 def csmap(x):
1900 self.ui.debug(_("add changeset %s\n") % short(x))
1900 self.ui.debug(_("add changeset %s\n") % short(x))
1901 return len(cl)
1901 return len(cl)
1902
1902
1903 def revmap(x):
1903 def revmap(x):
1904 return cl.rev(x)
1904 return cl.rev(x)
1905
1905
1906 if not source:
1906 if not source:
1907 return 0
1907 return 0
1908
1908
1909 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1909 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1910
1910
1911 changesets = files = revisions = 0
1911 changesets = files = revisions = 0
1912
1912
1913 # write changelog data to temp files so concurrent readers will not see
1913 # write changelog data to temp files so concurrent readers will not see
1914 # inconsistent view
1914 # inconsistent view
1915 cl = self.changelog
1915 cl = self.changelog
1916 cl.delayupdate()
1916 cl.delayupdate()
1917 oldheads = len(cl.heads())
1917 oldheads = len(cl.heads())
1918
1918
1919 tr = self.transaction()
1919 tr = self.transaction()
1920 try:
1920 try:
1921 trp = weakref.proxy(tr)
1921 trp = weakref.proxy(tr)
1922 # pull off the changeset group
1922 # pull off the changeset group
1923 self.ui.status(_("adding changesets\n"))
1923 self.ui.status(_("adding changesets\n"))
1924 clstart = len(cl)
1924 clstart = len(cl)
1925 chunkiter = changegroup.chunkiter(source)
1925 chunkiter = changegroup.chunkiter(source)
1926 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1926 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1927 raise util.Abort(_("received changelog group is empty"))
1927 raise util.Abort(_("received changelog group is empty"))
1928 clend = len(cl)
1928 clend = len(cl)
1929 changesets = clend - clstart
1929 changesets = clend - clstart
1930
1930
1931 # pull off the manifest group
1931 # pull off the manifest group
1932 self.ui.status(_("adding manifests\n"))
1932 self.ui.status(_("adding manifests\n"))
1933 chunkiter = changegroup.chunkiter(source)
1933 chunkiter = changegroup.chunkiter(source)
1934 # no need to check for empty manifest group here:
1934 # no need to check for empty manifest group here:
1935 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1935 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1936 # no new manifest will be created and the manifest group will
1936 # no new manifest will be created and the manifest group will
1937 # be empty during the pull
1937 # be empty during the pull
1938 self.manifest.addgroup(chunkiter, revmap, trp)
1938 self.manifest.addgroup(chunkiter, revmap, trp)
1939
1939
1940 # process the files
1940 # process the files
1941 self.ui.status(_("adding file changes\n"))
1941 self.ui.status(_("adding file changes\n"))
1942 while 1:
1942 while 1:
1943 f = changegroup.getchunk(source)
1943 f = changegroup.getchunk(source)
1944 if not f:
1944 if not f:
1945 break
1945 break
1946 self.ui.debug(_("adding %s revisions\n") % f)
1946 self.ui.debug(_("adding %s revisions\n") % f)
1947 fl = self.file(f)
1947 fl = self.file(f)
1948 o = len(fl)
1948 o = len(fl)
1949 chunkiter = changegroup.chunkiter(source)
1949 chunkiter = changegroup.chunkiter(source)
1950 if fl.addgroup(chunkiter, revmap, trp) is None:
1950 if fl.addgroup(chunkiter, revmap, trp) is None:
1951 raise util.Abort(_("received file revlog group is empty"))
1951 raise util.Abort(_("received file revlog group is empty"))
1952 revisions += len(fl) - o
1952 revisions += len(fl) - o
1953 files += 1
1953 files += 1
1954
1954
1955 newheads = len(cl.heads())
1955 newheads = len(cl.heads())
1956 heads = ""
1956 heads = ""
1957 if oldheads and newheads != oldheads:
1957 if oldheads and newheads != oldheads:
1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1959
1959
1960 self.ui.status(_("added %d changesets"
1960 self.ui.status(_("added %d changesets"
1961 " with %d changes to %d files%s\n")
1961 " with %d changes to %d files%s\n")
1962 % (changesets, revisions, files, heads))
1962 % (changesets, revisions, files, heads))
1963
1963
1964 if changesets > 0:
1964 if changesets > 0:
1965 p = lambda: cl.writepending() and self.root or ""
1965 p = lambda: cl.writepending() and self.root or ""
1966 self.hook('pretxnchangegroup', throw=True,
1966 self.hook('pretxnchangegroup', throw=True,
1967 node=hex(cl.node(clstart)), source=srctype,
1967 node=hex(cl.node(clstart)), source=srctype,
1968 url=url, pending=p)
1968 url=url, pending=p)
1969
1969
1970 # make changelog see real files again
1970 # make changelog see real files again
1971 cl.finalize(trp)
1971 cl.finalize(trp)
1972
1972
1973 tr.close()
1973 tr.close()
1974 finally:
1974 finally:
1975 del tr
1975 del tr
1976
1976
1977 if changesets > 0:
1977 if changesets > 0:
1978 # forcefully update the on-disk branch cache
1978 # forcefully update the on-disk branch cache
1979 self.ui.debug(_("updating the branch cache\n"))
1979 self.ui.debug(_("updating the branch cache\n"))
1980 self.branchtags()
1980 self.branchtags()
1981 self.hook("changegroup", node=hex(cl.node(clstart)),
1981 self.hook("changegroup", node=hex(cl.node(clstart)),
1982 source=srctype, url=url)
1982 source=srctype, url=url)
1983
1983
1984 for i in xrange(clstart, clend):
1984 for i in xrange(clstart, clend):
1985 self.hook("incoming", node=hex(cl.node(i)),
1985 self.hook("incoming", node=hex(cl.node(i)),
1986 source=srctype, url=url)
1986 source=srctype, url=url)
1987
1987
1988 # never return 0 here:
1988 # never return 0 here:
1989 if newheads < oldheads:
1989 if newheads < oldheads:
1990 return newheads - oldheads - 1
1990 return newheads - oldheads - 1
1991 else:
1991 else:
1992 return newheads - oldheads + 1
1992 return newheads - oldheads + 1
1993
1993
1994
1994
1995 def stream_in(self, remote):
1995 def stream_in(self, remote):
1996 fp = remote.stream_out()
1996 fp = remote.stream_out()
1997 l = fp.readline()
1997 l = fp.readline()
1998 try:
1998 try:
1999 resp = int(l)
1999 resp = int(l)
2000 except ValueError:
2000 except ValueError:
2001 raise error.ResponseError(
2001 raise error.ResponseError(
2002 _('Unexpected response from remote server:'), l)
2002 _('Unexpected response from remote server:'), l)
2003 if resp == 1:
2003 if resp == 1:
2004 raise util.Abort(_('operation forbidden by server'))
2004 raise util.Abort(_('operation forbidden by server'))
2005 elif resp == 2:
2005 elif resp == 2:
2006 raise util.Abort(_('locking the remote repository failed'))
2006 raise util.Abort(_('locking the remote repository failed'))
2007 elif resp != 0:
2007 elif resp != 0:
2008 raise util.Abort(_('the server sent an unknown error code'))
2008 raise util.Abort(_('the server sent an unknown error code'))
2009 self.ui.status(_('streaming all changes\n'))
2009 self.ui.status(_('streaming all changes\n'))
2010 l = fp.readline()
2010 l = fp.readline()
2011 try:
2011 try:
2012 total_files, total_bytes = map(int, l.split(' ', 1))
2012 total_files, total_bytes = map(int, l.split(' ', 1))
2013 except (ValueError, TypeError):
2013 except (ValueError, TypeError):
2014 raise error.ResponseError(
2014 raise error.ResponseError(
2015 _('Unexpected response from remote server:'), l)
2015 _('Unexpected response from remote server:'), l)
2016 self.ui.status(_('%d files to transfer, %s of data\n') %
2016 self.ui.status(_('%d files to transfer, %s of data\n') %
2017 (total_files, util.bytecount(total_bytes)))
2017 (total_files, util.bytecount(total_bytes)))
2018 start = time.time()
2018 start = time.time()
2019 for i in xrange(total_files):
2019 for i in xrange(total_files):
2020 # XXX doesn't support '\n' or '\r' in filenames
2020 # XXX doesn't support '\n' or '\r' in filenames
2021 l = fp.readline()
2021 l = fp.readline()
2022 try:
2022 try:
2023 name, size = l.split('\0', 1)
2023 name, size = l.split('\0', 1)
2024 size = int(size)
2024 size = int(size)
2025 except (ValueError, TypeError):
2025 except (ValueError, TypeError):
2026 raise error.ResponseError(
2026 raise error.ResponseError(
2027 _('Unexpected response from remote server:'), l)
2027 _('Unexpected response from remote server:'), l)
2028 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2028 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2029 ofp = self.sopener(name, 'w')
2029 ofp = self.sopener(name, 'w')
2030 for chunk in util.filechunkiter(fp, limit=size):
2030 for chunk in util.filechunkiter(fp, limit=size):
2031 ofp.write(chunk)
2031 ofp.write(chunk)
2032 ofp.close()
2032 ofp.close()
2033 elapsed = time.time() - start
2033 elapsed = time.time() - start
2034 if elapsed <= 0:
2034 if elapsed <= 0:
2035 elapsed = 0.001
2035 elapsed = 0.001
2036 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2036 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2037 (util.bytecount(total_bytes), elapsed,
2037 (util.bytecount(total_bytes), elapsed,
2038 util.bytecount(total_bytes / elapsed)))
2038 util.bytecount(total_bytes / elapsed)))
2039 self.invalidate()
2039 self.invalidate()
2040 return len(self.heads()) + 1
2040 return len(self.heads()) + 1
2041
2041
2042 def clone(self, remote, heads=[], stream=False):
2042 def clone(self, remote, heads=[], stream=False):
2043 '''clone remote repository.
2043 '''clone remote repository.
2044
2044
2045 keyword arguments:
2045 keyword arguments:
2046 heads: list of revs to clone (forces use of pull)
2046 heads: list of revs to clone (forces use of pull)
2047 stream: use streaming clone if possible'''
2047 stream: use streaming clone if possible'''
2048
2048
2049 # now, all clients that can request uncompressed clones can
2049 # now, all clients that can request uncompressed clones can
2050 # read repo formats supported by all servers that can serve
2050 # read repo formats supported by all servers that can serve
2051 # them.
2051 # them.
2052
2052
2053 # if revlog format changes, client will have to check version
2053 # if revlog format changes, client will have to check version
2054 # and format flags on "stream" capability, and use
2054 # and format flags on "stream" capability, and use
2055 # uncompressed only if compatible.
2055 # uncompressed only if compatible.
2056
2056
2057 if stream and not heads and remote.capable('stream'):
2057 if stream and not heads and remote.capable('stream'):
2058 return self.stream_in(remote)
2058 return self.stream_in(remote)
2059 return self.pull(remote, heads)
2059 return self.pull(remote, heads)
2060
2060
2061 # used to avoid circular references so destructors work
2061 # used to avoid circular references so destructors work
2062 def aftertrans(files):
2062 def aftertrans(files):
2063 renamefiles = [tuple(t) for t in files]
2063 renamefiles = [tuple(t) for t in files]
2064 def a():
2064 def a():
2065 for src, dest in renamefiles:
2065 for src, dest in renamefiles:
2066 util.rename(src, dest)
2066 util.rename(src, dest)
2067 return a
2067 return a
2068
2068
2069 def instance(ui, path, create):
2069 def instance(ui, path, create):
2070 return localrepository(ui, util.drop_scheme('file', path), create)
2070 return localrepository(ui, util.drop_scheme('file', path), create)
2071
2071
2072 def islocal(path):
2072 def islocal(path):
2073 return True
2073 return True
@@ -1,1395 +1,1395 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid, short
10 from node import hex, nullid, short
11 import base85, cmdutil, mdiff, util, diffhelpers, copies
11 import base85, cmdutil, mdiff, util, diffhelpers, copies
12 import cStringIO, email.Parser, os, re, math
12 import cStringIO, email.Parser, os, re, math
13 import sys, tempfile, zlib
13 import sys, tempfile, zlib
14
14
15 gitre = re.compile('diff --git a/(.*) b/(.*)')
15 gitre = re.compile('diff --git a/(.*) b/(.*)')
16
16
17 class PatchError(Exception):
17 class PatchError(Exception):
18 pass
18 pass
19
19
20 class NoHunks(PatchError):
20 class NoHunks(PatchError):
21 pass
21 pass
22
22
23 # helper functions
23 # helper functions
24
24
25 def copyfile(src, dst, basedir):
25 def copyfile(src, dst, basedir):
26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
27 if os.path.exists(absdst):
27 if os.path.exists(absdst):
28 raise util.Abort(_("cannot create %s: destination already exists") %
28 raise util.Abort(_("cannot create %s: destination already exists") %
29 dst)
29 dst)
30
30
31 dstdir = os.path.dirname(absdst)
31 dstdir = os.path.dirname(absdst)
32 if dstdir and not os.path.isdir(dstdir):
32 if dstdir and not os.path.isdir(dstdir):
33 try:
33 try:
34 os.makedirs(dstdir)
34 os.makedirs(dstdir)
35 except IOError:
35 except IOError:
36 raise util.Abort(
36 raise util.Abort(
37 _("cannot create %s: unable to create destination directory")
37 _("cannot create %s: unable to create destination directory")
38 % dst)
38 % dst)
39
39
40 util.copyfile(abssrc, absdst)
40 util.copyfile(abssrc, absdst)
41
41
42 # public functions
42 # public functions
43
43
44 def extract(ui, fileobj):
44 def extract(ui, fileobj):
45 '''extract patch from data read from fileobj.
45 '''extract patch from data read from fileobj.
46
46
47 patch can be a normal patch or contained in an email message.
47 patch can be a normal patch or contained in an email message.
48
48
49 return tuple (filename, message, user, date, node, p1, p2).
49 return tuple (filename, message, user, date, node, p1, p2).
50 Any item in the returned tuple can be None. If filename is None,
50 Any item in the returned tuple can be None. If filename is None,
51 fileobj did not contain a patch. Caller must unlink filename when done.'''
51 fileobj did not contain a patch. Caller must unlink filename when done.'''
52
52
53 # attempt to detect the start of a patch
53 # attempt to detect the start of a patch
54 # (this heuristic is borrowed from quilt)
54 # (this heuristic is borrowed from quilt)
55 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
55 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
56 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
56 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
57 r'(---|\*\*\*)[ \t])', re.MULTILINE)
57 r'(---|\*\*\*)[ \t])', re.MULTILINE)
58
58
59 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
59 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
60 tmpfp = os.fdopen(fd, 'w')
60 tmpfp = os.fdopen(fd, 'w')
61 try:
61 try:
62 msg = email.Parser.Parser().parse(fileobj)
62 msg = email.Parser.Parser().parse(fileobj)
63
63
64 subject = msg['Subject']
64 subject = msg['Subject']
65 user = msg['From']
65 user = msg['From']
66 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
66 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
67 # should try to parse msg['Date']
67 # should try to parse msg['Date']
68 date = None
68 date = None
69 nodeid = None
69 nodeid = None
70 branch = None
70 branch = None
71 parents = []
71 parents = []
72
72
73 if subject:
73 if subject:
74 if subject.startswith('[PATCH'):
74 if subject.startswith('[PATCH'):
75 pend = subject.find(']')
75 pend = subject.find(']')
76 if pend >= 0:
76 if pend >= 0:
77 subject = subject[pend+1:].lstrip()
77 subject = subject[pend+1:].lstrip()
78 subject = subject.replace('\n\t', ' ')
78 subject = subject.replace('\n\t', ' ')
79 ui.debug('Subject: %s\n' % subject)
79 ui.debug('Subject: %s\n' % subject)
80 if user:
80 if user:
81 ui.debug('From: %s\n' % user)
81 ui.debug('From: %s\n' % user)
82 diffs_seen = 0
82 diffs_seen = 0
83 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
83 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 message = ''
84 message = ''
85 for part in msg.walk():
85 for part in msg.walk():
86 content_type = part.get_content_type()
86 content_type = part.get_content_type()
87 ui.debug('Content-Type: %s\n' % content_type)
87 ui.debug('Content-Type: %s\n' % content_type)
88 if content_type not in ok_types:
88 if content_type not in ok_types:
89 continue
89 continue
90 payload = part.get_payload(decode=True)
90 payload = part.get_payload(decode=True)
91 m = diffre.search(payload)
91 m = diffre.search(payload)
92 if m:
92 if m:
93 hgpatch = False
93 hgpatch = False
94 ignoretext = False
94 ignoretext = False
95
95
96 ui.debug(_('found patch at byte %d\n') % m.start(0))
96 ui.debug(_('found patch at byte %d\n') % m.start(0))
97 diffs_seen += 1
97 diffs_seen += 1
98 cfp = cStringIO.StringIO()
98 cfp = cStringIO.StringIO()
99 for line in payload[:m.start(0)].splitlines():
99 for line in payload[:m.start(0)].splitlines():
100 if line.startswith('# HG changeset patch'):
100 if line.startswith('# HG changeset patch'):
101 ui.debug(_('patch generated by hg export\n'))
101 ui.debug(_('patch generated by hg export\n'))
102 hgpatch = True
102 hgpatch = True
103 # drop earlier commit message content
103 # drop earlier commit message content
104 cfp.seek(0)
104 cfp.seek(0)
105 cfp.truncate()
105 cfp.truncate()
106 subject = None
106 subject = None
107 elif hgpatch:
107 elif hgpatch:
108 if line.startswith('# User '):
108 if line.startswith('# User '):
109 user = line[7:]
109 user = line[7:]
110 ui.debug('From: %s\n' % user)
110 ui.debug('From: %s\n' % user)
111 elif line.startswith("# Date "):
111 elif line.startswith("# Date "):
112 date = line[7:]
112 date = line[7:]
113 elif line.startswith("# Branch "):
113 elif line.startswith("# Branch "):
114 branch = line[9:]
114 branch = line[9:]
115 elif line.startswith("# Node ID "):
115 elif line.startswith("# Node ID "):
116 nodeid = line[10:]
116 nodeid = line[10:]
117 elif line.startswith("# Parent "):
117 elif line.startswith("# Parent "):
118 parents.append(line[10:])
118 parents.append(line[10:])
119 elif line == '---' and gitsendmail:
119 elif line == '---' and gitsendmail:
120 ignoretext = True
120 ignoretext = True
121 if not line.startswith('# ') and not ignoretext:
121 if not line.startswith('# ') and not ignoretext:
122 cfp.write(line)
122 cfp.write(line)
123 cfp.write('\n')
123 cfp.write('\n')
124 message = cfp.getvalue()
124 message = cfp.getvalue()
125 if tmpfp:
125 if tmpfp:
126 tmpfp.write(payload)
126 tmpfp.write(payload)
127 if not payload.endswith('\n'):
127 if not payload.endswith('\n'):
128 tmpfp.write('\n')
128 tmpfp.write('\n')
129 elif not diffs_seen and message and content_type == 'text/plain':
129 elif not diffs_seen and message and content_type == 'text/plain':
130 message += '\n' + payload
130 message += '\n' + payload
131 except:
131 except:
132 tmpfp.close()
132 tmpfp.close()
133 os.unlink(tmpname)
133 os.unlink(tmpname)
134 raise
134 raise
135
135
136 if subject and not message.startswith(subject):
136 if subject and not message.startswith(subject):
137 message = '%s\n%s' % (subject, message)
137 message = '%s\n%s' % (subject, message)
138 tmpfp.close()
138 tmpfp.close()
139 if not diffs_seen:
139 if not diffs_seen:
140 os.unlink(tmpname)
140 os.unlink(tmpname)
141 return None, message, user, date, branch, None, None, None
141 return None, message, user, date, branch, None, None, None
142 p1 = parents and parents.pop(0) or None
142 p1 = parents and parents.pop(0) or None
143 p2 = parents and parents.pop(0) or None
143 p2 = parents and parents.pop(0) or None
144 return tmpname, message, user, date, branch, nodeid, p1, p2
144 return tmpname, message, user, date, branch, nodeid, p1, p2
145
145
146 GP_PATCH = 1 << 0 # we have to run patch
146 GP_PATCH = 1 << 0 # we have to run patch
147 GP_FILTER = 1 << 1 # there's some copy/rename operation
147 GP_FILTER = 1 << 1 # there's some copy/rename operation
148 GP_BINARY = 1 << 2 # there's a binary patch
148 GP_BINARY = 1 << 2 # there's a binary patch
149
149
150 class patchmeta:
150 class patchmeta:
151 """Patched file metadata
151 """Patched file metadata
152
152
153 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
153 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
154 or COPY. 'path' is patched file path. 'oldpath' is set to the
154 or COPY. 'path' is patched file path. 'oldpath' is set to the
155 origin file when 'op' is either COPY or RENAME, None otherwise. If
155 origin file when 'op' is either COPY or RENAME, None otherwise. If
156 file mode is changed, 'mode' is a tuple (islink, isexec) where
156 file mode is changed, 'mode' is a tuple (islink, isexec) where
157 'islink' is True if the file is a symlink and 'isexec' is True if
157 'islink' is True if the file is a symlink and 'isexec' is True if
158 the file is executable. Otherwise, 'mode' is None.
158 the file is executable. Otherwise, 'mode' is None.
159 """
159 """
160 def __init__(self, path):
160 def __init__(self, path):
161 self.path = path
161 self.path = path
162 self.oldpath = None
162 self.oldpath = None
163 self.mode = None
163 self.mode = None
164 self.op = 'MODIFY'
164 self.op = 'MODIFY'
165 self.lineno = 0
165 self.lineno = 0
166 self.binary = False
166 self.binary = False
167
167
168 def setmode(self, mode):
168 def setmode(self, mode):
169 islink = mode & 020000
169 islink = mode & 020000
170 isexec = mode & 0100
170 isexec = mode & 0100
171 self.mode = (islink, isexec)
171 self.mode = (islink, isexec)
172
172
173 def readgitpatch(lr):
173 def readgitpatch(lr):
174 """extract git-style metadata about patches from <patchname>"""
174 """extract git-style metadata about patches from <patchname>"""
175
175
176 # Filter patch for git information
176 # Filter patch for git information
177 gp = None
177 gp = None
178 gitpatches = []
178 gitpatches = []
179 # Can have a git patch with only metadata, causing patch to complain
179 # Can have a git patch with only metadata, causing patch to complain
180 dopatch = 0
180 dopatch = 0
181
181
182 lineno = 0
182 lineno = 0
183 for line in lr:
183 for line in lr:
184 lineno += 1
184 lineno += 1
185 if line.startswith('diff --git'):
185 if line.startswith('diff --git'):
186 m = gitre.match(line)
186 m = gitre.match(line)
187 if m:
187 if m:
188 if gp:
188 if gp:
189 gitpatches.append(gp)
189 gitpatches.append(gp)
190 src, dst = m.group(1, 2)
190 src, dst = m.group(1, 2)
191 gp = patchmeta(dst)
191 gp = patchmeta(dst)
192 gp.lineno = lineno
192 gp.lineno = lineno
193 elif gp:
193 elif gp:
194 if line.startswith('--- '):
194 if line.startswith('--- '):
195 if gp.op in ('COPY', 'RENAME'):
195 if gp.op in ('COPY', 'RENAME'):
196 dopatch |= GP_FILTER
196 dopatch |= GP_FILTER
197 gitpatches.append(gp)
197 gitpatches.append(gp)
198 gp = None
198 gp = None
199 dopatch |= GP_PATCH
199 dopatch |= GP_PATCH
200 continue
200 continue
201 if line.startswith('rename from '):
201 if line.startswith('rename from '):
202 gp.op = 'RENAME'
202 gp.op = 'RENAME'
203 gp.oldpath = line[12:].rstrip()
203 gp.oldpath = line[12:].rstrip()
204 elif line.startswith('rename to '):
204 elif line.startswith('rename to '):
205 gp.path = line[10:].rstrip()
205 gp.path = line[10:].rstrip()
206 elif line.startswith('copy from '):
206 elif line.startswith('copy from '):
207 gp.op = 'COPY'
207 gp.op = 'COPY'
208 gp.oldpath = line[10:].rstrip()
208 gp.oldpath = line[10:].rstrip()
209 elif line.startswith('copy to '):
209 elif line.startswith('copy to '):
210 gp.path = line[8:].rstrip()
210 gp.path = line[8:].rstrip()
211 elif line.startswith('deleted file'):
211 elif line.startswith('deleted file'):
212 gp.op = 'DELETE'
212 gp.op = 'DELETE'
213 # is the deleted file a symlink?
213 # is the deleted file a symlink?
214 gp.setmode(int(line.rstrip()[-6:], 8))
214 gp.setmode(int(line.rstrip()[-6:], 8))
215 elif line.startswith('new file mode '):
215 elif line.startswith('new file mode '):
216 gp.op = 'ADD'
216 gp.op = 'ADD'
217 gp.setmode(int(line.rstrip()[-6:], 8))
217 gp.setmode(int(line.rstrip()[-6:], 8))
218 elif line.startswith('new mode '):
218 elif line.startswith('new mode '):
219 gp.setmode(int(line.rstrip()[-6:], 8))
219 gp.setmode(int(line.rstrip()[-6:], 8))
220 elif line.startswith('GIT binary patch'):
220 elif line.startswith('GIT binary patch'):
221 dopatch |= GP_BINARY
221 dopatch |= GP_BINARY
222 gp.binary = True
222 gp.binary = True
223 if gp:
223 if gp:
224 gitpatches.append(gp)
224 gitpatches.append(gp)
225
225
226 if not gitpatches:
226 if not gitpatches:
227 dopatch = GP_PATCH
227 dopatch = GP_PATCH
228
228
229 return (dopatch, gitpatches)
229 return (dopatch, gitpatches)
230
230
231 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
231 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
232 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
232 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
233 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
233 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
234
234
235 class patchfile:
235 class patchfile:
236 def __init__(self, ui, fname, opener, missing=False):
236 def __init__(self, ui, fname, opener, missing=False):
237 self.fname = fname
237 self.fname = fname
238 self.opener = opener
238 self.opener = opener
239 self.ui = ui
239 self.ui = ui
240 self.lines = []
240 self.lines = []
241 self.exists = False
241 self.exists = False
242 self.missing = missing
242 self.missing = missing
243 if not missing:
243 if not missing:
244 try:
244 try:
245 self.lines = self.readlines(fname)
245 self.lines = self.readlines(fname)
246 self.exists = True
246 self.exists = True
247 except IOError:
247 except IOError:
248 pass
248 pass
249 else:
249 else:
250 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
250 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
251
251
252 self.hash = {}
252 self.hash = {}
253 self.dirty = 0
253 self.dirty = 0
254 self.offset = 0
254 self.offset = 0
255 self.rej = []
255 self.rej = []
256 self.fileprinted = False
256 self.fileprinted = False
257 self.printfile(False)
257 self.printfile(False)
258 self.hunks = 0
258 self.hunks = 0
259
259
260 def readlines(self, fname):
260 def readlines(self, fname):
261 fp = self.opener(fname, 'r')
261 fp = self.opener(fname, 'r')
262 try:
262 try:
263 return fp.readlines()
263 return fp.readlines()
264 finally:
264 finally:
265 fp.close()
265 fp.close()
266
266
267 def writelines(self, fname, lines):
267 def writelines(self, fname, lines):
268 fp = self.opener(fname, 'w')
268 fp = self.opener(fname, 'w')
269 try:
269 try:
270 fp.writelines(lines)
270 fp.writelines(lines)
271 finally:
271 finally:
272 fp.close()
272 fp.close()
273
273
274 def unlink(self, fname):
274 def unlink(self, fname):
275 os.unlink(fname)
275 os.unlink(fname)
276
276
277 def printfile(self, warn):
277 def printfile(self, warn):
278 if self.fileprinted:
278 if self.fileprinted:
279 return
279 return
280 if warn or self.ui.verbose:
280 if warn or self.ui.verbose:
281 self.fileprinted = True
281 self.fileprinted = True
282 s = _("patching file %s\n") % self.fname
282 s = _("patching file %s\n") % self.fname
283 if warn:
283 if warn:
284 self.ui.warn(s)
284 self.ui.warn(s)
285 else:
285 else:
286 self.ui.note(s)
286 self.ui.note(s)
287
287
288
288
289 def findlines(self, l, linenum):
289 def findlines(self, l, linenum):
290 # looks through the hash and finds candidate lines. The
290 # looks through the hash and finds candidate lines. The
291 # result is a list of line numbers sorted based on distance
291 # result is a list of line numbers sorted based on distance
292 # from linenum
292 # from linenum
293 def sorter(a, b):
293 def sorter(a, b):
294 vala = abs(a - linenum)
294 vala = abs(a - linenum)
295 valb = abs(b - linenum)
295 valb = abs(b - linenum)
296 return cmp(vala, valb)
296 return cmp(vala, valb)
297
297
298 try:
298 try:
299 cand = self.hash[l]
299 cand = self.hash[l]
300 except:
300 except:
301 return []
301 return []
302
302
303 if len(cand) > 1:
303 if len(cand) > 1:
304 # resort our list of potentials forward then back.
304 # resort our list of potentials forward then back.
305 cand.sort(sorter)
305 cand.sort(sorter)
306 return cand
306 return cand
307
307
308 def hashlines(self):
308 def hashlines(self):
309 self.hash = {}
309 self.hash = {}
310 for x in xrange(len(self.lines)):
310 for x in xrange(len(self.lines)):
311 s = self.lines[x]
311 s = self.lines[x]
312 self.hash.setdefault(s, []).append(x)
312 self.hash.setdefault(s, []).append(x)
313
313
314 def write_rej(self):
314 def write_rej(self):
315 # our rejects are a little different from patch(1). This always
315 # our rejects are a little different from patch(1). This always
316 # creates rejects in the same form as the original patch. A file
316 # creates rejects in the same form as the original patch. A file
317 # header is inserted so that you can run the reject through patch again
317 # header is inserted so that you can run the reject through patch again
318 # without having to type the filename.
318 # without having to type the filename.
319
319
320 if not self.rej:
320 if not self.rej:
321 return
321 return
322
322
323 fname = self.fname + ".rej"
323 fname = self.fname + ".rej"
324 self.ui.warn(
324 self.ui.warn(
325 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
325 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
326 (len(self.rej), self.hunks, fname))
326 (len(self.rej), self.hunks, fname))
327
327
328 def rejlines():
328 def rejlines():
329 base = os.path.basename(self.fname)
329 base = os.path.basename(self.fname)
330 yield "--- %s\n+++ %s\n" % (base, base)
330 yield "--- %s\n+++ %s\n" % (base, base)
331 for x in self.rej:
331 for x in self.rej:
332 for l in x.hunk:
332 for l in x.hunk:
333 yield l
333 yield l
334 if l[-1] != '\n':
334 if l[-1] != '\n':
335 yield "\n\ No newline at end of file\n"
335 yield "\n\ No newline at end of file\n"
336
336
337 self.writelines(fname, rejlines())
337 self.writelines(fname, rejlines())
338
338
339 def write(self, dest=None):
339 def write(self, dest=None):
340 if not self.dirty:
340 if not self.dirty:
341 return
341 return
342 if not dest:
342 if not dest:
343 dest = self.fname
343 dest = self.fname
344 self.writelines(dest, self.lines)
344 self.writelines(dest, self.lines)
345
345
346 def close(self):
346 def close(self):
347 self.write()
347 self.write()
348 self.write_rej()
348 self.write_rej()
349
349
350 def apply(self, h, reverse):
350 def apply(self, h, reverse):
351 if not h.complete():
351 if not h.complete():
352 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
352 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
353 (h.number, h.desc, len(h.a), h.lena, len(h.b),
353 (h.number, h.desc, len(h.a), h.lena, len(h.b),
354 h.lenb))
354 h.lenb))
355
355
356 self.hunks += 1
356 self.hunks += 1
357 if reverse:
357 if reverse:
358 h.reverse()
358 h.reverse()
359
359
360 if self.missing:
360 if self.missing:
361 self.rej.append(h)
361 self.rej.append(h)
362 return -1
362 return -1
363
363
364 if self.exists and h.createfile():
364 if self.exists and h.createfile():
365 self.ui.warn(_("file %s already exists\n") % self.fname)
365 self.ui.warn(_("file %s already exists\n") % self.fname)
366 self.rej.append(h)
366 self.rej.append(h)
367 return -1
367 return -1
368
368
369 if isinstance(h, githunk):
369 if isinstance(h, githunk):
370 if h.rmfile():
370 if h.rmfile():
371 self.unlink(self.fname)
371 self.unlink(self.fname)
372 else:
372 else:
373 self.lines[:] = h.new()
373 self.lines[:] = h.new()
374 self.offset += len(h.new())
374 self.offset += len(h.new())
375 self.dirty = 1
375 self.dirty = 1
376 return 0
376 return 0
377
377
378 # fast case first, no offsets, no fuzz
378 # fast case first, no offsets, no fuzz
379 old = h.old()
379 old = h.old()
380 # patch starts counting at 1 unless we are adding the file
380 # patch starts counting at 1 unless we are adding the file
381 if h.starta == 0:
381 if h.starta == 0:
382 start = 0
382 start = 0
383 else:
383 else:
384 start = h.starta + self.offset - 1
384 start = h.starta + self.offset - 1
385 orig_start = start
385 orig_start = start
386 if diffhelpers.testhunk(old, self.lines, start) == 0:
386 if diffhelpers.testhunk(old, self.lines, start) == 0:
387 if h.rmfile():
387 if h.rmfile():
388 self.unlink(self.fname)
388 self.unlink(self.fname)
389 else:
389 else:
390 self.lines[start : start + h.lena] = h.new()
390 self.lines[start : start + h.lena] = h.new()
391 self.offset += h.lenb - h.lena
391 self.offset += h.lenb - h.lena
392 self.dirty = 1
392 self.dirty = 1
393 return 0
393 return 0
394
394
395 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
395 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
396 self.hashlines()
396 self.hashlines()
397 if h.hunk[-1][0] != ' ':
397 if h.hunk[-1][0] != ' ':
398 # if the hunk tried to put something at the bottom of the file
398 # if the hunk tried to put something at the bottom of the file
399 # override the start line and use eof here
399 # override the start line and use eof here
400 search_start = len(self.lines)
400 search_start = len(self.lines)
401 else:
401 else:
402 search_start = orig_start
402 search_start = orig_start
403
403
404 for fuzzlen in xrange(3):
404 for fuzzlen in xrange(3):
405 for toponly in [ True, False ]:
405 for toponly in [ True, False ]:
406 old = h.old(fuzzlen, toponly)
406 old = h.old(fuzzlen, toponly)
407
407
408 cand = self.findlines(old[0][1:], search_start)
408 cand = self.findlines(old[0][1:], search_start)
409 for l in cand:
409 for l in cand:
410 if diffhelpers.testhunk(old, self.lines, l) == 0:
410 if diffhelpers.testhunk(old, self.lines, l) == 0:
411 newlines = h.new(fuzzlen, toponly)
411 newlines = h.new(fuzzlen, toponly)
412 self.lines[l : l + len(old)] = newlines
412 self.lines[l : l + len(old)] = newlines
413 self.offset += len(newlines) - len(old)
413 self.offset += len(newlines) - len(old)
414 self.dirty = 1
414 self.dirty = 1
415 if fuzzlen:
415 if fuzzlen:
416 fuzzstr = "with fuzz %d " % fuzzlen
416 fuzzstr = "with fuzz %d " % fuzzlen
417 f = self.ui.warn
417 f = self.ui.warn
418 self.printfile(True)
418 self.printfile(True)
419 else:
419 else:
420 fuzzstr = ""
420 fuzzstr = ""
421 f = self.ui.note
421 f = self.ui.note
422 offset = l - orig_start - fuzzlen
422 offset = l - orig_start - fuzzlen
423 if offset == 1:
423 if offset == 1:
424 msg = _("Hunk #%d succeeded at %d %s"
424 msg = _("Hunk #%d succeeded at %d %s"
425 "(offset %d line).\n")
425 "(offset %d line).\n")
426 else:
426 else:
427 msg = _("Hunk #%d succeeded at %d %s"
427 msg = _("Hunk #%d succeeded at %d %s"
428 "(offset %d lines).\n")
428 "(offset %d lines).\n")
429 f(msg % (h.number, l+1, fuzzstr, offset))
429 f(msg % (h.number, l+1, fuzzstr, offset))
430 return fuzzlen
430 return fuzzlen
431 self.printfile(True)
431 self.printfile(True)
432 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
432 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
433 self.rej.append(h)
433 self.rej.append(h)
434 return -1
434 return -1
435
435
436 class hunk:
436 class hunk:
437 def __init__(self, desc, num, lr, context, create=False, remove=False):
437 def __init__(self, desc, num, lr, context, create=False, remove=False):
438 self.number = num
438 self.number = num
439 self.desc = desc
439 self.desc = desc
440 self.hunk = [ desc ]
440 self.hunk = [ desc ]
441 self.a = []
441 self.a = []
442 self.b = []
442 self.b = []
443 if context:
443 if context:
444 self.read_context_hunk(lr)
444 self.read_context_hunk(lr)
445 else:
445 else:
446 self.read_unified_hunk(lr)
446 self.read_unified_hunk(lr)
447 self.create = create
447 self.create = create
448 self.remove = remove and not create
448 self.remove = remove and not create
449
449
450 def read_unified_hunk(self, lr):
450 def read_unified_hunk(self, lr):
451 m = unidesc.match(self.desc)
451 m = unidesc.match(self.desc)
452 if not m:
452 if not m:
453 raise PatchError(_("bad hunk #%d") % self.number)
453 raise PatchError(_("bad hunk #%d") % self.number)
454 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
454 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
455 if self.lena == None:
455 if self.lena is None:
456 self.lena = 1
456 self.lena = 1
457 else:
457 else:
458 self.lena = int(self.lena)
458 self.lena = int(self.lena)
459 if self.lenb == None:
459 if self.lenb is None:
460 self.lenb = 1
460 self.lenb = 1
461 else:
461 else:
462 self.lenb = int(self.lenb)
462 self.lenb = int(self.lenb)
463 self.starta = int(self.starta)
463 self.starta = int(self.starta)
464 self.startb = int(self.startb)
464 self.startb = int(self.startb)
465 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
465 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
466 # if we hit eof before finishing out the hunk, the last line will
466 # if we hit eof before finishing out the hunk, the last line will
467 # be zero length. Lets try to fix it up.
467 # be zero length. Lets try to fix it up.
468 while len(self.hunk[-1]) == 0:
468 while len(self.hunk[-1]) == 0:
469 del self.hunk[-1]
469 del self.hunk[-1]
470 del self.a[-1]
470 del self.a[-1]
471 del self.b[-1]
471 del self.b[-1]
472 self.lena -= 1
472 self.lena -= 1
473 self.lenb -= 1
473 self.lenb -= 1
474
474
475 def read_context_hunk(self, lr):
475 def read_context_hunk(self, lr):
476 self.desc = lr.readline()
476 self.desc = lr.readline()
477 m = contextdesc.match(self.desc)
477 m = contextdesc.match(self.desc)
478 if not m:
478 if not m:
479 raise PatchError(_("bad hunk #%d") % self.number)
479 raise PatchError(_("bad hunk #%d") % self.number)
480 foo, self.starta, foo2, aend, foo3 = m.groups()
480 foo, self.starta, foo2, aend, foo3 = m.groups()
481 self.starta = int(self.starta)
481 self.starta = int(self.starta)
482 if aend == None:
482 if aend is None:
483 aend = self.starta
483 aend = self.starta
484 self.lena = int(aend) - self.starta
484 self.lena = int(aend) - self.starta
485 if self.starta:
485 if self.starta:
486 self.lena += 1
486 self.lena += 1
487 for x in xrange(self.lena):
487 for x in xrange(self.lena):
488 l = lr.readline()
488 l = lr.readline()
489 if l.startswith('---'):
489 if l.startswith('---'):
490 lr.push(l)
490 lr.push(l)
491 break
491 break
492 s = l[2:]
492 s = l[2:]
493 if l.startswith('- ') or l.startswith('! '):
493 if l.startswith('- ') or l.startswith('! '):
494 u = '-' + s
494 u = '-' + s
495 elif l.startswith(' '):
495 elif l.startswith(' '):
496 u = ' ' + s
496 u = ' ' + s
497 else:
497 else:
498 raise PatchError(_("bad hunk #%d old text line %d") %
498 raise PatchError(_("bad hunk #%d old text line %d") %
499 (self.number, x))
499 (self.number, x))
500 self.a.append(u)
500 self.a.append(u)
501 self.hunk.append(u)
501 self.hunk.append(u)
502
502
503 l = lr.readline()
503 l = lr.readline()
504 if l.startswith('\ '):
504 if l.startswith('\ '):
505 s = self.a[-1][:-1]
505 s = self.a[-1][:-1]
506 self.a[-1] = s
506 self.a[-1] = s
507 self.hunk[-1] = s
507 self.hunk[-1] = s
508 l = lr.readline()
508 l = lr.readline()
509 m = contextdesc.match(l)
509 m = contextdesc.match(l)
510 if not m:
510 if not m:
511 raise PatchError(_("bad hunk #%d") % self.number)
511 raise PatchError(_("bad hunk #%d") % self.number)
512 foo, self.startb, foo2, bend, foo3 = m.groups()
512 foo, self.startb, foo2, bend, foo3 = m.groups()
513 self.startb = int(self.startb)
513 self.startb = int(self.startb)
514 if bend == None:
514 if bend is None:
515 bend = self.startb
515 bend = self.startb
516 self.lenb = int(bend) - self.startb
516 self.lenb = int(bend) - self.startb
517 if self.startb:
517 if self.startb:
518 self.lenb += 1
518 self.lenb += 1
519 hunki = 1
519 hunki = 1
520 for x in xrange(self.lenb):
520 for x in xrange(self.lenb):
521 l = lr.readline()
521 l = lr.readline()
522 if l.startswith('\ '):
522 if l.startswith('\ '):
523 s = self.b[-1][:-1]
523 s = self.b[-1][:-1]
524 self.b[-1] = s
524 self.b[-1] = s
525 self.hunk[hunki-1] = s
525 self.hunk[hunki-1] = s
526 continue
526 continue
527 if not l:
527 if not l:
528 lr.push(l)
528 lr.push(l)
529 break
529 break
530 s = l[2:]
530 s = l[2:]
531 if l.startswith('+ ') or l.startswith('! '):
531 if l.startswith('+ ') or l.startswith('! '):
532 u = '+' + s
532 u = '+' + s
533 elif l.startswith(' '):
533 elif l.startswith(' '):
534 u = ' ' + s
534 u = ' ' + s
535 elif len(self.b) == 0:
535 elif len(self.b) == 0:
536 # this can happen when the hunk does not add any lines
536 # this can happen when the hunk does not add any lines
537 lr.push(l)
537 lr.push(l)
538 break
538 break
539 else:
539 else:
540 raise PatchError(_("bad hunk #%d old text line %d") %
540 raise PatchError(_("bad hunk #%d old text line %d") %
541 (self.number, x))
541 (self.number, x))
542 self.b.append(s)
542 self.b.append(s)
543 while True:
543 while True:
544 if hunki >= len(self.hunk):
544 if hunki >= len(self.hunk):
545 h = ""
545 h = ""
546 else:
546 else:
547 h = self.hunk[hunki]
547 h = self.hunk[hunki]
548 hunki += 1
548 hunki += 1
549 if h == u:
549 if h == u:
550 break
550 break
551 elif h.startswith('-'):
551 elif h.startswith('-'):
552 continue
552 continue
553 else:
553 else:
554 self.hunk.insert(hunki-1, u)
554 self.hunk.insert(hunki-1, u)
555 break
555 break
556
556
557 if not self.a:
557 if not self.a:
558 # this happens when lines were only added to the hunk
558 # this happens when lines were only added to the hunk
559 for x in self.hunk:
559 for x in self.hunk:
560 if x.startswith('-') or x.startswith(' '):
560 if x.startswith('-') or x.startswith(' '):
561 self.a.append(x)
561 self.a.append(x)
562 if not self.b:
562 if not self.b:
563 # this happens when lines were only deleted from the hunk
563 # this happens when lines were only deleted from the hunk
564 for x in self.hunk:
564 for x in self.hunk:
565 if x.startswith('+') or x.startswith(' '):
565 if x.startswith('+') or x.startswith(' '):
566 self.b.append(x[1:])
566 self.b.append(x[1:])
567 # @@ -start,len +start,len @@
567 # @@ -start,len +start,len @@
568 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
568 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
569 self.startb, self.lenb)
569 self.startb, self.lenb)
570 self.hunk[0] = self.desc
570 self.hunk[0] = self.desc
571
571
572 def reverse(self):
572 def reverse(self):
573 self.create, self.remove = self.remove, self.create
573 self.create, self.remove = self.remove, self.create
574 origlena = self.lena
574 origlena = self.lena
575 origstarta = self.starta
575 origstarta = self.starta
576 self.lena = self.lenb
576 self.lena = self.lenb
577 self.starta = self.startb
577 self.starta = self.startb
578 self.lenb = origlena
578 self.lenb = origlena
579 self.startb = origstarta
579 self.startb = origstarta
580 self.a = []
580 self.a = []
581 self.b = []
581 self.b = []
582 # self.hunk[0] is the @@ description
582 # self.hunk[0] is the @@ description
583 for x in xrange(1, len(self.hunk)):
583 for x in xrange(1, len(self.hunk)):
584 o = self.hunk[x]
584 o = self.hunk[x]
585 if o.startswith('-'):
585 if o.startswith('-'):
586 n = '+' + o[1:]
586 n = '+' + o[1:]
587 self.b.append(o[1:])
587 self.b.append(o[1:])
588 elif o.startswith('+'):
588 elif o.startswith('+'):
589 n = '-' + o[1:]
589 n = '-' + o[1:]
590 self.a.append(n)
590 self.a.append(n)
591 else:
591 else:
592 n = o
592 n = o
593 self.b.append(o[1:])
593 self.b.append(o[1:])
594 self.a.append(o)
594 self.a.append(o)
595 self.hunk[x] = o
595 self.hunk[x] = o
596
596
597 def fix_newline(self):
597 def fix_newline(self):
598 diffhelpers.fix_newline(self.hunk, self.a, self.b)
598 diffhelpers.fix_newline(self.hunk, self.a, self.b)
599
599
600 def complete(self):
600 def complete(self):
601 return len(self.a) == self.lena and len(self.b) == self.lenb
601 return len(self.a) == self.lena and len(self.b) == self.lenb
602
602
603 def createfile(self):
603 def createfile(self):
604 return self.starta == 0 and self.lena == 0 and self.create
604 return self.starta == 0 and self.lena == 0 and self.create
605
605
606 def rmfile(self):
606 def rmfile(self):
607 return self.startb == 0 and self.lenb == 0 and self.remove
607 return self.startb == 0 and self.lenb == 0 and self.remove
608
608
609 def fuzzit(self, l, fuzz, toponly):
609 def fuzzit(self, l, fuzz, toponly):
610 # this removes context lines from the top and bottom of list 'l'. It
610 # this removes context lines from the top and bottom of list 'l'. It
611 # checks the hunk to make sure only context lines are removed, and then
611 # checks the hunk to make sure only context lines are removed, and then
612 # returns a new shortened list of lines.
612 # returns a new shortened list of lines.
613 fuzz = min(fuzz, len(l)-1)
613 fuzz = min(fuzz, len(l)-1)
614 if fuzz:
614 if fuzz:
615 top = 0
615 top = 0
616 bot = 0
616 bot = 0
617 hlen = len(self.hunk)
617 hlen = len(self.hunk)
618 for x in xrange(hlen-1):
618 for x in xrange(hlen-1):
619 # the hunk starts with the @@ line, so use x+1
619 # the hunk starts with the @@ line, so use x+1
620 if self.hunk[x+1][0] == ' ':
620 if self.hunk[x+1][0] == ' ':
621 top += 1
621 top += 1
622 else:
622 else:
623 break
623 break
624 if not toponly:
624 if not toponly:
625 for x in xrange(hlen-1):
625 for x in xrange(hlen-1):
626 if self.hunk[hlen-bot-1][0] == ' ':
626 if self.hunk[hlen-bot-1][0] == ' ':
627 bot += 1
627 bot += 1
628 else:
628 else:
629 break
629 break
630
630
631 # top and bot now count context in the hunk
631 # top and bot now count context in the hunk
632 # adjust them if either one is short
632 # adjust them if either one is short
633 context = max(top, bot, 3)
633 context = max(top, bot, 3)
634 if bot < context:
634 if bot < context:
635 bot = max(0, fuzz - (context - bot))
635 bot = max(0, fuzz - (context - bot))
636 else:
636 else:
637 bot = min(fuzz, bot)
637 bot = min(fuzz, bot)
638 if top < context:
638 if top < context:
639 top = max(0, fuzz - (context - top))
639 top = max(0, fuzz - (context - top))
640 else:
640 else:
641 top = min(fuzz, top)
641 top = min(fuzz, top)
642
642
643 return l[top:len(l)-bot]
643 return l[top:len(l)-bot]
644 return l
644 return l
645
645
646 def old(self, fuzz=0, toponly=False):
646 def old(self, fuzz=0, toponly=False):
647 return self.fuzzit(self.a, fuzz, toponly)
647 return self.fuzzit(self.a, fuzz, toponly)
648
648
649 def newctrl(self):
649 def newctrl(self):
650 res = []
650 res = []
651 for x in self.hunk:
651 for x in self.hunk:
652 c = x[0]
652 c = x[0]
653 if c == ' ' or c == '+':
653 if c == ' ' or c == '+':
654 res.append(x)
654 res.append(x)
655 return res
655 return res
656
656
657 def new(self, fuzz=0, toponly=False):
657 def new(self, fuzz=0, toponly=False):
658 return self.fuzzit(self.b, fuzz, toponly)
658 return self.fuzzit(self.b, fuzz, toponly)
659
659
660 class githunk(object):
660 class githunk(object):
661 """A git hunk"""
661 """A git hunk"""
662 def __init__(self, gitpatch):
662 def __init__(self, gitpatch):
663 self.gitpatch = gitpatch
663 self.gitpatch = gitpatch
664 self.text = None
664 self.text = None
665 self.hunk = []
665 self.hunk = []
666
666
667 def createfile(self):
667 def createfile(self):
668 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
668 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
669
669
670 def rmfile(self):
670 def rmfile(self):
671 return self.gitpatch.op == 'DELETE'
671 return self.gitpatch.op == 'DELETE'
672
672
673 def complete(self):
673 def complete(self):
674 return self.text is not None
674 return self.text is not None
675
675
676 def new(self):
676 def new(self):
677 return [self.text]
677 return [self.text]
678
678
679 class binhunk(githunk):
679 class binhunk(githunk):
680 'A binary patch file. Only understands literals so far.'
680 'A binary patch file. Only understands literals so far.'
681 def __init__(self, gitpatch):
681 def __init__(self, gitpatch):
682 super(binhunk, self).__init__(gitpatch)
682 super(binhunk, self).__init__(gitpatch)
683 self.hunk = ['GIT binary patch\n']
683 self.hunk = ['GIT binary patch\n']
684
684
685 def extract(self, lr):
685 def extract(self, lr):
686 line = lr.readline()
686 line = lr.readline()
687 self.hunk.append(line)
687 self.hunk.append(line)
688 while line and not line.startswith('literal '):
688 while line and not line.startswith('literal '):
689 line = lr.readline()
689 line = lr.readline()
690 self.hunk.append(line)
690 self.hunk.append(line)
691 if not line:
691 if not line:
692 raise PatchError(_('could not extract binary patch'))
692 raise PatchError(_('could not extract binary patch'))
693 size = int(line[8:].rstrip())
693 size = int(line[8:].rstrip())
694 dec = []
694 dec = []
695 line = lr.readline()
695 line = lr.readline()
696 self.hunk.append(line)
696 self.hunk.append(line)
697 while len(line) > 1:
697 while len(line) > 1:
698 l = line[0]
698 l = line[0]
699 if l <= 'Z' and l >= 'A':
699 if l <= 'Z' and l >= 'A':
700 l = ord(l) - ord('A') + 1
700 l = ord(l) - ord('A') + 1
701 else:
701 else:
702 l = ord(l) - ord('a') + 27
702 l = ord(l) - ord('a') + 27
703 dec.append(base85.b85decode(line[1:-1])[:l])
703 dec.append(base85.b85decode(line[1:-1])[:l])
704 line = lr.readline()
704 line = lr.readline()
705 self.hunk.append(line)
705 self.hunk.append(line)
706 text = zlib.decompress(''.join(dec))
706 text = zlib.decompress(''.join(dec))
707 if len(text) != size:
707 if len(text) != size:
708 raise PatchError(_('binary patch is %d bytes, not %d') %
708 raise PatchError(_('binary patch is %d bytes, not %d') %
709 len(text), size)
709 len(text), size)
710 self.text = text
710 self.text = text
711
711
712 class symlinkhunk(githunk):
712 class symlinkhunk(githunk):
713 """A git symlink hunk"""
713 """A git symlink hunk"""
714 def __init__(self, gitpatch, hunk):
714 def __init__(self, gitpatch, hunk):
715 super(symlinkhunk, self).__init__(gitpatch)
715 super(symlinkhunk, self).__init__(gitpatch)
716 self.hunk = hunk
716 self.hunk = hunk
717
717
718 def complete(self):
718 def complete(self):
719 return True
719 return True
720
720
721 def fix_newline(self):
721 def fix_newline(self):
722 return
722 return
723
723
724 def parsefilename(str):
724 def parsefilename(str):
725 # --- filename \t|space stuff
725 # --- filename \t|space stuff
726 s = str[4:].rstrip('\r\n')
726 s = str[4:].rstrip('\r\n')
727 i = s.find('\t')
727 i = s.find('\t')
728 if i < 0:
728 if i < 0:
729 i = s.find(' ')
729 i = s.find(' ')
730 if i < 0:
730 if i < 0:
731 return s
731 return s
732 return s[:i]
732 return s[:i]
733
733
734 def selectfile(afile_orig, bfile_orig, hunk, strip, reverse):
734 def selectfile(afile_orig, bfile_orig, hunk, strip, reverse):
735 def pathstrip(path, count=1):
735 def pathstrip(path, count=1):
736 pathlen = len(path)
736 pathlen = len(path)
737 i = 0
737 i = 0
738 if count == 0:
738 if count == 0:
739 return '', path.rstrip()
739 return '', path.rstrip()
740 while count > 0:
740 while count > 0:
741 i = path.find('/', i)
741 i = path.find('/', i)
742 if i == -1:
742 if i == -1:
743 raise PatchError(_("unable to strip away %d dirs from %s") %
743 raise PatchError(_("unable to strip away %d dirs from %s") %
744 (count, path))
744 (count, path))
745 i += 1
745 i += 1
746 # consume '//' in the path
746 # consume '//' in the path
747 while i < pathlen - 1 and path[i] == '/':
747 while i < pathlen - 1 and path[i] == '/':
748 i += 1
748 i += 1
749 count -= 1
749 count -= 1
750 return path[:i].lstrip(), path[i:].rstrip()
750 return path[:i].lstrip(), path[i:].rstrip()
751
751
752 nulla = afile_orig == "/dev/null"
752 nulla = afile_orig == "/dev/null"
753 nullb = bfile_orig == "/dev/null"
753 nullb = bfile_orig == "/dev/null"
754 abase, afile = pathstrip(afile_orig, strip)
754 abase, afile = pathstrip(afile_orig, strip)
755 gooda = not nulla and util.lexists(afile)
755 gooda = not nulla and util.lexists(afile)
756 bbase, bfile = pathstrip(bfile_orig, strip)
756 bbase, bfile = pathstrip(bfile_orig, strip)
757 if afile == bfile:
757 if afile == bfile:
758 goodb = gooda
758 goodb = gooda
759 else:
759 else:
760 goodb = not nullb and os.path.exists(bfile)
760 goodb = not nullb and os.path.exists(bfile)
761 createfunc = hunk.createfile
761 createfunc = hunk.createfile
762 if reverse:
762 if reverse:
763 createfunc = hunk.rmfile
763 createfunc = hunk.rmfile
764 missing = not goodb and not gooda and not createfunc()
764 missing = not goodb and not gooda and not createfunc()
765 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
765 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
766 # diff is between a file and its backup. In this case, the original
766 # diff is between a file and its backup. In this case, the original
767 # file should be patched (see original mpatch code).
767 # file should be patched (see original mpatch code).
768 isbackup = (abase == bbase and bfile.startswith(afile))
768 isbackup = (abase == bbase and bfile.startswith(afile))
769 fname = None
769 fname = None
770 if not missing:
770 if not missing:
771 if gooda and goodb:
771 if gooda and goodb:
772 fname = isbackup and afile or bfile
772 fname = isbackup and afile or bfile
773 elif gooda:
773 elif gooda:
774 fname = afile
774 fname = afile
775
775
776 if not fname:
776 if not fname:
777 if not nullb:
777 if not nullb:
778 fname = isbackup and afile or bfile
778 fname = isbackup and afile or bfile
779 elif not nulla:
779 elif not nulla:
780 fname = afile
780 fname = afile
781 else:
781 else:
782 raise PatchError(_("undefined source and destination files"))
782 raise PatchError(_("undefined source and destination files"))
783
783
784 return fname, missing
784 return fname, missing
785
785
786 class linereader:
786 class linereader:
787 # simple class to allow pushing lines back into the input stream
787 # simple class to allow pushing lines back into the input stream
788 def __init__(self, fp):
788 def __init__(self, fp):
789 self.fp = fp
789 self.fp = fp
790 self.buf = []
790 self.buf = []
791
791
792 def push(self, line):
792 def push(self, line):
793 if line is not None:
793 if line is not None:
794 self.buf.append(line)
794 self.buf.append(line)
795
795
796 def readline(self):
796 def readline(self):
797 if self.buf:
797 if self.buf:
798 return self.buf.pop(0)
798 return self.buf.pop(0)
799 return self.fp.readline()
799 return self.fp.readline()
800
800
801 def __iter__(self):
801 def __iter__(self):
802 while 1:
802 while 1:
803 l = self.readline()
803 l = self.readline()
804 if not l:
804 if not l:
805 break
805 break
806 yield l
806 yield l
807
807
808 def scangitpatch(lr, firstline):
808 def scangitpatch(lr, firstline):
809 """
809 """
810 Git patches can emit:
810 Git patches can emit:
811 - rename a to b
811 - rename a to b
812 - change b
812 - change b
813 - copy a to c
813 - copy a to c
814 - change c
814 - change c
815
815
816 We cannot apply this sequence as-is, the renamed 'a' could not be
816 We cannot apply this sequence as-is, the renamed 'a' could not be
817 found for it would have been renamed already. And we cannot copy
817 found for it would have been renamed already. And we cannot copy
818 from 'b' instead because 'b' would have been changed already. So
818 from 'b' instead because 'b' would have been changed already. So
819 we scan the git patch for copy and rename commands so we can
819 we scan the git patch for copy and rename commands so we can
820 perform the copies ahead of time.
820 perform the copies ahead of time.
821 """
821 """
822 pos = 0
822 pos = 0
823 try:
823 try:
824 pos = lr.fp.tell()
824 pos = lr.fp.tell()
825 fp = lr.fp
825 fp = lr.fp
826 except IOError:
826 except IOError:
827 fp = cStringIO.StringIO(lr.fp.read())
827 fp = cStringIO.StringIO(lr.fp.read())
828 gitlr = linereader(fp)
828 gitlr = linereader(fp)
829 gitlr.push(firstline)
829 gitlr.push(firstline)
830 (dopatch, gitpatches) = readgitpatch(gitlr)
830 (dopatch, gitpatches) = readgitpatch(gitlr)
831 fp.seek(pos)
831 fp.seek(pos)
832 return dopatch, gitpatches
832 return dopatch, gitpatches
833
833
834 def iterhunks(ui, fp, sourcefile=None):
834 def iterhunks(ui, fp, sourcefile=None):
835 """Read a patch and yield the following events:
835 """Read a patch and yield the following events:
836 - ("file", afile, bfile, firsthunk): select a new target file.
836 - ("file", afile, bfile, firsthunk): select a new target file.
837 - ("hunk", hunk): a new hunk is ready to be applied, follows a
837 - ("hunk", hunk): a new hunk is ready to be applied, follows a
838 "file" event.
838 "file" event.
839 - ("git", gitchanges): current diff is in git format, gitchanges
839 - ("git", gitchanges): current diff is in git format, gitchanges
840 maps filenames to gitpatch records. Unique event.
840 maps filenames to gitpatch records. Unique event.
841 """
841 """
842 changed = {}
842 changed = {}
843 current_hunk = None
843 current_hunk = None
844 afile = ""
844 afile = ""
845 bfile = ""
845 bfile = ""
846 state = None
846 state = None
847 hunknum = 0
847 hunknum = 0
848 emitfile = False
848 emitfile = False
849 git = False
849 git = False
850
850
851 # our states
851 # our states
852 BFILE = 1
852 BFILE = 1
853 context = None
853 context = None
854 lr = linereader(fp)
854 lr = linereader(fp)
855 dopatch = True
855 dopatch = True
856 # gitworkdone is True if a git operation (copy, rename, ...) was
856 # gitworkdone is True if a git operation (copy, rename, ...) was
857 # performed already for the current file. Useful when the file
857 # performed already for the current file. Useful when the file
858 # section may have no hunk.
858 # section may have no hunk.
859 gitworkdone = False
859 gitworkdone = False
860
860
861 while True:
861 while True:
862 newfile = False
862 newfile = False
863 x = lr.readline()
863 x = lr.readline()
864 if not x:
864 if not x:
865 break
865 break
866 if current_hunk:
866 if current_hunk:
867 if x.startswith('\ '):
867 if x.startswith('\ '):
868 current_hunk.fix_newline()
868 current_hunk.fix_newline()
869 yield 'hunk', current_hunk
869 yield 'hunk', current_hunk
870 current_hunk = None
870 current_hunk = None
871 gitworkdone = False
871 gitworkdone = False
872 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
872 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
873 ((context is not False) and x.startswith('***************')))):
873 ((context is not False) and x.startswith('***************')))):
874 try:
874 try:
875 if context == None and x.startswith('***************'):
875 if context is None and x.startswith('***************'):
876 context = True
876 context = True
877 gpatch = changed.get(bfile)
877 gpatch = changed.get(bfile)
878 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
878 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
879 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
879 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
880 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
880 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
881 if remove:
881 if remove:
882 gpatch = changed.get(afile[2:])
882 gpatch = changed.get(afile[2:])
883 if gpatch and gpatch.mode[0]:
883 if gpatch and gpatch.mode[0]:
884 current_hunk = symlinkhunk(gpatch, current_hunk)
884 current_hunk = symlinkhunk(gpatch, current_hunk)
885 except PatchError, err:
885 except PatchError, err:
886 ui.debug(err)
886 ui.debug(err)
887 current_hunk = None
887 current_hunk = None
888 continue
888 continue
889 hunknum += 1
889 hunknum += 1
890 if emitfile:
890 if emitfile:
891 emitfile = False
891 emitfile = False
892 yield 'file', (afile, bfile, current_hunk)
892 yield 'file', (afile, bfile, current_hunk)
893 elif state == BFILE and x.startswith('GIT binary patch'):
893 elif state == BFILE and x.startswith('GIT binary patch'):
894 current_hunk = binhunk(changed[bfile])
894 current_hunk = binhunk(changed[bfile])
895 hunknum += 1
895 hunknum += 1
896 if emitfile:
896 if emitfile:
897 emitfile = False
897 emitfile = False
898 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
898 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
899 current_hunk.extract(lr)
899 current_hunk.extract(lr)
900 elif x.startswith('diff --git'):
900 elif x.startswith('diff --git'):
901 # check for git diff, scanning the whole patch file if needed
901 # check for git diff, scanning the whole patch file if needed
902 m = gitre.match(x)
902 m = gitre.match(x)
903 if m:
903 if m:
904 afile, bfile = m.group(1, 2)
904 afile, bfile = m.group(1, 2)
905 if not git:
905 if not git:
906 git = True
906 git = True
907 dopatch, gitpatches = scangitpatch(lr, x)
907 dopatch, gitpatches = scangitpatch(lr, x)
908 yield 'git', gitpatches
908 yield 'git', gitpatches
909 for gp in gitpatches:
909 for gp in gitpatches:
910 changed[gp.path] = gp
910 changed[gp.path] = gp
911 # else error?
911 # else error?
912 # copy/rename + modify should modify target, not source
912 # copy/rename + modify should modify target, not source
913 gp = changed.get(bfile)
913 gp = changed.get(bfile)
914 if gp and gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD'):
914 if gp and gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD'):
915 afile = bfile
915 afile = bfile
916 gitworkdone = True
916 gitworkdone = True
917 newfile = True
917 newfile = True
918 elif x.startswith('---'):
918 elif x.startswith('---'):
919 # check for a unified diff
919 # check for a unified diff
920 l2 = lr.readline()
920 l2 = lr.readline()
921 if not l2.startswith('+++'):
921 if not l2.startswith('+++'):
922 lr.push(l2)
922 lr.push(l2)
923 continue
923 continue
924 newfile = True
924 newfile = True
925 context = False
925 context = False
926 afile = parsefilename(x)
926 afile = parsefilename(x)
927 bfile = parsefilename(l2)
927 bfile = parsefilename(l2)
928 elif x.startswith('***'):
928 elif x.startswith('***'):
929 # check for a context diff
929 # check for a context diff
930 l2 = lr.readline()
930 l2 = lr.readline()
931 if not l2.startswith('---'):
931 if not l2.startswith('---'):
932 lr.push(l2)
932 lr.push(l2)
933 continue
933 continue
934 l3 = lr.readline()
934 l3 = lr.readline()
935 lr.push(l3)
935 lr.push(l3)
936 if not l3.startswith("***************"):
936 if not l3.startswith("***************"):
937 lr.push(l2)
937 lr.push(l2)
938 continue
938 continue
939 newfile = True
939 newfile = True
940 context = True
940 context = True
941 afile = parsefilename(x)
941 afile = parsefilename(x)
942 bfile = parsefilename(l2)
942 bfile = parsefilename(l2)
943
943
944 if newfile:
944 if newfile:
945 emitfile = True
945 emitfile = True
946 state = BFILE
946 state = BFILE
947 hunknum = 0
947 hunknum = 0
948 if current_hunk:
948 if current_hunk:
949 if current_hunk.complete():
949 if current_hunk.complete():
950 yield 'hunk', current_hunk
950 yield 'hunk', current_hunk
951 else:
951 else:
952 raise PatchError(_("malformed patch %s %s") % (afile,
952 raise PatchError(_("malformed patch %s %s") % (afile,
953 current_hunk.desc))
953 current_hunk.desc))
954
954
955 if hunknum == 0 and dopatch and not gitworkdone:
955 if hunknum == 0 and dopatch and not gitworkdone:
956 raise NoHunks
956 raise NoHunks
957
957
958 def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False):
958 def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False):
959 """reads a patch from fp and tries to apply it. The dict 'changed' is
959 """reads a patch from fp and tries to apply it. The dict 'changed' is
960 filled in with all of the filenames changed by the patch. Returns 0
960 filled in with all of the filenames changed by the patch. Returns 0
961 for a clean patch, -1 if any rejects were found and 1 if there was
961 for a clean patch, -1 if any rejects were found and 1 if there was
962 any fuzz."""
962 any fuzz."""
963
963
964 rejects = 0
964 rejects = 0
965 err = 0
965 err = 0
966 current_file = None
966 current_file = None
967 gitpatches = None
967 gitpatches = None
968 opener = util.opener(os.getcwd())
968 opener = util.opener(os.getcwd())
969
969
970 def closefile():
970 def closefile():
971 if not current_file:
971 if not current_file:
972 return 0
972 return 0
973 current_file.close()
973 current_file.close()
974 return len(current_file.rej)
974 return len(current_file.rej)
975
975
976 for state, values in iterhunks(ui, fp, sourcefile):
976 for state, values in iterhunks(ui, fp, sourcefile):
977 if state == 'hunk':
977 if state == 'hunk':
978 if not current_file:
978 if not current_file:
979 continue
979 continue
980 current_hunk = values
980 current_hunk = values
981 ret = current_file.apply(current_hunk, reverse)
981 ret = current_file.apply(current_hunk, reverse)
982 if ret >= 0:
982 if ret >= 0:
983 changed.setdefault(current_file.fname, None)
983 changed.setdefault(current_file.fname, None)
984 if ret > 0:
984 if ret > 0:
985 err = 1
985 err = 1
986 elif state == 'file':
986 elif state == 'file':
987 rejects += closefile()
987 rejects += closefile()
988 afile, bfile, first_hunk = values
988 afile, bfile, first_hunk = values
989 try:
989 try:
990 if sourcefile:
990 if sourcefile:
991 current_file = patchfile(ui, sourcefile, opener)
991 current_file = patchfile(ui, sourcefile, opener)
992 else:
992 else:
993 current_file, missing = selectfile(afile, bfile, first_hunk,
993 current_file, missing = selectfile(afile, bfile, first_hunk,
994 strip, reverse)
994 strip, reverse)
995 current_file = patchfile(ui, current_file, opener, missing)
995 current_file = patchfile(ui, current_file, opener, missing)
996 except PatchError, err:
996 except PatchError, err:
997 ui.warn(str(err) + '\n')
997 ui.warn(str(err) + '\n')
998 current_file, current_hunk = None, None
998 current_file, current_hunk = None, None
999 rejects += 1
999 rejects += 1
1000 continue
1000 continue
1001 elif state == 'git':
1001 elif state == 'git':
1002 gitpatches = values
1002 gitpatches = values
1003 cwd = os.getcwd()
1003 cwd = os.getcwd()
1004 for gp in gitpatches:
1004 for gp in gitpatches:
1005 if gp.op in ('COPY', 'RENAME'):
1005 if gp.op in ('COPY', 'RENAME'):
1006 copyfile(gp.oldpath, gp.path, cwd)
1006 copyfile(gp.oldpath, gp.path, cwd)
1007 changed[gp.path] = gp
1007 changed[gp.path] = gp
1008 else:
1008 else:
1009 raise util.Abort(_('unsupported parser state: %s') % state)
1009 raise util.Abort(_('unsupported parser state: %s') % state)
1010
1010
1011 rejects += closefile()
1011 rejects += closefile()
1012
1012
1013 if rejects:
1013 if rejects:
1014 return -1
1014 return -1
1015 return err
1015 return err
1016
1016
1017 def diffopts(ui, opts={}, untrusted=False):
1017 def diffopts(ui, opts={}, untrusted=False):
1018 def get(key, name=None, getter=ui.configbool):
1018 def get(key, name=None, getter=ui.configbool):
1019 return (opts.get(key) or
1019 return (opts.get(key) or
1020 getter('diff', name or key, None, untrusted=untrusted))
1020 getter('diff', name or key, None, untrusted=untrusted))
1021 return mdiff.diffopts(
1021 return mdiff.diffopts(
1022 text=opts.get('text'),
1022 text=opts.get('text'),
1023 git=get('git'),
1023 git=get('git'),
1024 nodates=get('nodates'),
1024 nodates=get('nodates'),
1025 showfunc=get('show_function', 'showfunc'),
1025 showfunc=get('show_function', 'showfunc'),
1026 ignorews=get('ignore_all_space', 'ignorews'),
1026 ignorews=get('ignore_all_space', 'ignorews'),
1027 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1027 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1028 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1028 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1029 context=get('unified', getter=ui.config))
1029 context=get('unified', getter=ui.config))
1030
1030
1031 def updatedir(ui, repo, patches, similarity=0):
1031 def updatedir(ui, repo, patches, similarity=0):
1032 '''Update dirstate after patch application according to metadata'''
1032 '''Update dirstate after patch application according to metadata'''
1033 if not patches:
1033 if not patches:
1034 return
1034 return
1035 copies = []
1035 copies = []
1036 removes = set()
1036 removes = set()
1037 cfiles = patches.keys()
1037 cfiles = patches.keys()
1038 cwd = repo.getcwd()
1038 cwd = repo.getcwd()
1039 if cwd:
1039 if cwd:
1040 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1040 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1041 for f in patches:
1041 for f in patches:
1042 gp = patches[f]
1042 gp = patches[f]
1043 if not gp:
1043 if not gp:
1044 continue
1044 continue
1045 if gp.op == 'RENAME':
1045 if gp.op == 'RENAME':
1046 copies.append((gp.oldpath, gp.path))
1046 copies.append((gp.oldpath, gp.path))
1047 removes.add(gp.oldpath)
1047 removes.add(gp.oldpath)
1048 elif gp.op == 'COPY':
1048 elif gp.op == 'COPY':
1049 copies.append((gp.oldpath, gp.path))
1049 copies.append((gp.oldpath, gp.path))
1050 elif gp.op == 'DELETE':
1050 elif gp.op == 'DELETE':
1051 removes.add(gp.path)
1051 removes.add(gp.path)
1052 for src, dst in copies:
1052 for src, dst in copies:
1053 repo.copy(src, dst)
1053 repo.copy(src, dst)
1054 if (not similarity) and removes:
1054 if (not similarity) and removes:
1055 repo.remove(sorted(removes), True)
1055 repo.remove(sorted(removes), True)
1056 for f in patches:
1056 for f in patches:
1057 gp = patches[f]
1057 gp = patches[f]
1058 if gp and gp.mode:
1058 if gp and gp.mode:
1059 islink, isexec = gp.mode
1059 islink, isexec = gp.mode
1060 dst = repo.wjoin(gp.path)
1060 dst = repo.wjoin(gp.path)
1061 # patch won't create empty files
1061 # patch won't create empty files
1062 if gp.op == 'ADD' and not os.path.exists(dst):
1062 if gp.op == 'ADD' and not os.path.exists(dst):
1063 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1063 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1064 repo.wwrite(gp.path, '', flags)
1064 repo.wwrite(gp.path, '', flags)
1065 elif gp.op != 'DELETE':
1065 elif gp.op != 'DELETE':
1066 util.set_flags(dst, islink, isexec)
1066 util.set_flags(dst, islink, isexec)
1067 cmdutil.addremove(repo, cfiles, similarity=similarity)
1067 cmdutil.addremove(repo, cfiles, similarity=similarity)
1068 files = patches.keys()
1068 files = patches.keys()
1069 files.extend([r for r in removes if r not in files])
1069 files.extend([r for r in removes if r not in files])
1070 return sorted(files)
1070 return sorted(files)
1071
1071
1072 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1072 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1073 """use <patcher> to apply <patchname> to the working directory.
1073 """use <patcher> to apply <patchname> to the working directory.
1074 returns whether patch was applied with fuzz factor."""
1074 returns whether patch was applied with fuzz factor."""
1075
1075
1076 fuzz = False
1076 fuzz = False
1077 if cwd:
1077 if cwd:
1078 args.append('-d %s' % util.shellquote(cwd))
1078 args.append('-d %s' % util.shellquote(cwd))
1079 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1079 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1080 util.shellquote(patchname)))
1080 util.shellquote(patchname)))
1081
1081
1082 for line in fp:
1082 for line in fp:
1083 line = line.rstrip()
1083 line = line.rstrip()
1084 ui.note(line + '\n')
1084 ui.note(line + '\n')
1085 if line.startswith('patching file '):
1085 if line.startswith('patching file '):
1086 pf = util.parse_patch_output(line)
1086 pf = util.parse_patch_output(line)
1087 printed_file = False
1087 printed_file = False
1088 files.setdefault(pf, None)
1088 files.setdefault(pf, None)
1089 elif line.find('with fuzz') >= 0:
1089 elif line.find('with fuzz') >= 0:
1090 fuzz = True
1090 fuzz = True
1091 if not printed_file:
1091 if not printed_file:
1092 ui.warn(pf + '\n')
1092 ui.warn(pf + '\n')
1093 printed_file = True
1093 printed_file = True
1094 ui.warn(line + '\n')
1094 ui.warn(line + '\n')
1095 elif line.find('saving rejects to file') >= 0:
1095 elif line.find('saving rejects to file') >= 0:
1096 ui.warn(line + '\n')
1096 ui.warn(line + '\n')
1097 elif line.find('FAILED') >= 0:
1097 elif line.find('FAILED') >= 0:
1098 if not printed_file:
1098 if not printed_file:
1099 ui.warn(pf + '\n')
1099 ui.warn(pf + '\n')
1100 printed_file = True
1100 printed_file = True
1101 ui.warn(line + '\n')
1101 ui.warn(line + '\n')
1102 code = fp.close()
1102 code = fp.close()
1103 if code:
1103 if code:
1104 raise PatchError(_("patch command failed: %s") %
1104 raise PatchError(_("patch command failed: %s") %
1105 util.explain_exit(code)[0])
1105 util.explain_exit(code)[0])
1106 return fuzz
1106 return fuzz
1107
1107
1108 def internalpatch(patchobj, ui, strip, cwd, files={}):
1108 def internalpatch(patchobj, ui, strip, cwd, files={}):
1109 """use builtin patch to apply <patchobj> to the working directory.
1109 """use builtin patch to apply <patchobj> to the working directory.
1110 returns whether patch was applied with fuzz factor."""
1110 returns whether patch was applied with fuzz factor."""
1111 try:
1111 try:
1112 fp = file(patchobj, 'rb')
1112 fp = file(patchobj, 'rb')
1113 except TypeError:
1113 except TypeError:
1114 fp = patchobj
1114 fp = patchobj
1115 if cwd:
1115 if cwd:
1116 curdir = os.getcwd()
1116 curdir = os.getcwd()
1117 os.chdir(cwd)
1117 os.chdir(cwd)
1118 try:
1118 try:
1119 ret = applydiff(ui, fp, files, strip=strip)
1119 ret = applydiff(ui, fp, files, strip=strip)
1120 finally:
1120 finally:
1121 if cwd:
1121 if cwd:
1122 os.chdir(curdir)
1122 os.chdir(curdir)
1123 if ret < 0:
1123 if ret < 0:
1124 raise PatchError
1124 raise PatchError
1125 return ret > 0
1125 return ret > 0
1126
1126
1127 def patch(patchname, ui, strip=1, cwd=None, files={}):
1127 def patch(patchname, ui, strip=1, cwd=None, files={}):
1128 """apply <patchname> to the working directory.
1128 """apply <patchname> to the working directory.
1129 returns whether patch was applied with fuzz factor."""
1129 returns whether patch was applied with fuzz factor."""
1130 patcher = ui.config('ui', 'patch')
1130 patcher = ui.config('ui', 'patch')
1131 args = []
1131 args = []
1132 try:
1132 try:
1133 if patcher:
1133 if patcher:
1134 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1134 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1135 files)
1135 files)
1136 else:
1136 else:
1137 try:
1137 try:
1138 return internalpatch(patchname, ui, strip, cwd, files)
1138 return internalpatch(patchname, ui, strip, cwd, files)
1139 except NoHunks:
1139 except NoHunks:
1140 patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch'
1140 patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch'
1141 ui.debug(_('no valid hunks found; trying with %r instead\n') %
1141 ui.debug(_('no valid hunks found; trying with %r instead\n') %
1142 patcher)
1142 patcher)
1143 if util.needbinarypatch():
1143 if util.needbinarypatch():
1144 args.append('--binary')
1144 args.append('--binary')
1145 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1145 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1146 files)
1146 files)
1147 except PatchError, err:
1147 except PatchError, err:
1148 s = str(err)
1148 s = str(err)
1149 if s:
1149 if s:
1150 raise util.Abort(s)
1150 raise util.Abort(s)
1151 else:
1151 else:
1152 raise util.Abort(_('patch failed to apply'))
1152 raise util.Abort(_('patch failed to apply'))
1153
1153
1154 def b85diff(to, tn):
1154 def b85diff(to, tn):
1155 '''print base85-encoded binary diff'''
1155 '''print base85-encoded binary diff'''
1156 def gitindex(text):
1156 def gitindex(text):
1157 if not text:
1157 if not text:
1158 return '0' * 40
1158 return '0' * 40
1159 l = len(text)
1159 l = len(text)
1160 s = util.sha1('blob %d\0' % l)
1160 s = util.sha1('blob %d\0' % l)
1161 s.update(text)
1161 s.update(text)
1162 return s.hexdigest()
1162 return s.hexdigest()
1163
1163
1164 def fmtline(line):
1164 def fmtline(line):
1165 l = len(line)
1165 l = len(line)
1166 if l <= 26:
1166 if l <= 26:
1167 l = chr(ord('A') + l - 1)
1167 l = chr(ord('A') + l - 1)
1168 else:
1168 else:
1169 l = chr(l - 26 + ord('a') - 1)
1169 l = chr(l - 26 + ord('a') - 1)
1170 return '%c%s\n' % (l, base85.b85encode(line, True))
1170 return '%c%s\n' % (l, base85.b85encode(line, True))
1171
1171
1172 def chunk(text, csize=52):
1172 def chunk(text, csize=52):
1173 l = len(text)
1173 l = len(text)
1174 i = 0
1174 i = 0
1175 while i < l:
1175 while i < l:
1176 yield text[i:i+csize]
1176 yield text[i:i+csize]
1177 i += csize
1177 i += csize
1178
1178
1179 tohash = gitindex(to)
1179 tohash = gitindex(to)
1180 tnhash = gitindex(tn)
1180 tnhash = gitindex(tn)
1181 if tohash == tnhash:
1181 if tohash == tnhash:
1182 return ""
1182 return ""
1183
1183
1184 # TODO: deltas
1184 # TODO: deltas
1185 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1185 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1186 (tohash, tnhash, len(tn))]
1186 (tohash, tnhash, len(tn))]
1187 for l in chunk(zlib.compress(tn)):
1187 for l in chunk(zlib.compress(tn)):
1188 ret.append(fmtline(l))
1188 ret.append(fmtline(l))
1189 ret.append('\n')
1189 ret.append('\n')
1190 return ''.join(ret)
1190 return ''.join(ret)
1191
1191
1192 def _addmodehdr(header, omode, nmode):
1192 def _addmodehdr(header, omode, nmode):
1193 if omode != nmode:
1193 if omode != nmode:
1194 header.append('old mode %s\n' % omode)
1194 header.append('old mode %s\n' % omode)
1195 header.append('new mode %s\n' % nmode)
1195 header.append('new mode %s\n' % nmode)
1196
1196
1197 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None):
1197 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None):
1198 '''yields diff of changes to files between two nodes, or node and
1198 '''yields diff of changes to files between two nodes, or node and
1199 working directory.
1199 working directory.
1200
1200
1201 if node1 is None, use first dirstate parent instead.
1201 if node1 is None, use first dirstate parent instead.
1202 if node2 is None, compare node1 with working directory.'''
1202 if node2 is None, compare node1 with working directory.'''
1203
1203
1204 if opts is None:
1204 if opts is None:
1205 opts = mdiff.defaultopts
1205 opts = mdiff.defaultopts
1206
1206
1207 if not node1:
1207 if not node1:
1208 node1 = repo.dirstate.parents()[0]
1208 node1 = repo.dirstate.parents()[0]
1209
1209
1210 flcache = {}
1210 flcache = {}
1211 def getfilectx(f, ctx):
1211 def getfilectx(f, ctx):
1212 flctx = ctx.filectx(f, filelog=flcache.get(f))
1212 flctx = ctx.filectx(f, filelog=flcache.get(f))
1213 if f not in flcache:
1213 if f not in flcache:
1214 flcache[f] = flctx._filelog
1214 flcache[f] = flctx._filelog
1215 return flctx
1215 return flctx
1216
1216
1217 ctx1 = repo[node1]
1217 ctx1 = repo[node1]
1218 ctx2 = repo[node2]
1218 ctx2 = repo[node2]
1219
1219
1220 if not changes:
1220 if not changes:
1221 changes = repo.status(ctx1, ctx2, match=match)
1221 changes = repo.status(ctx1, ctx2, match=match)
1222 modified, added, removed = changes[:3]
1222 modified, added, removed = changes[:3]
1223
1223
1224 if not modified and not added and not removed:
1224 if not modified and not added and not removed:
1225 return
1225 return
1226
1226
1227 date1 = util.datestr(ctx1.date())
1227 date1 = util.datestr(ctx1.date())
1228 man1 = ctx1.manifest()
1228 man1 = ctx1.manifest()
1229
1229
1230 if repo.ui.quiet:
1230 if repo.ui.quiet:
1231 r = None
1231 r = None
1232 else:
1232 else:
1233 hexfunc = repo.ui.debugflag and hex or short
1233 hexfunc = repo.ui.debugflag and hex or short
1234 r = [hexfunc(node) for node in [node1, node2] if node]
1234 r = [hexfunc(node) for node in [node1, node2] if node]
1235
1235
1236 if opts.git:
1236 if opts.git:
1237 copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid])
1237 copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid])
1238 copy = copy.copy()
1238 copy = copy.copy()
1239 for k, v in copy.items():
1239 for k, v in copy.items():
1240 copy[v] = k
1240 copy[v] = k
1241
1241
1242 gone = set()
1242 gone = set()
1243 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1243 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1244
1244
1245 for f in sorted(modified + added + removed):
1245 for f in sorted(modified + added + removed):
1246 to = None
1246 to = None
1247 tn = None
1247 tn = None
1248 dodiff = True
1248 dodiff = True
1249 header = []
1249 header = []
1250 if f in man1:
1250 if f in man1:
1251 to = getfilectx(f, ctx1).data()
1251 to = getfilectx(f, ctx1).data()
1252 if f not in removed:
1252 if f not in removed:
1253 tn = getfilectx(f, ctx2).data()
1253 tn = getfilectx(f, ctx2).data()
1254 a, b = f, f
1254 a, b = f, f
1255 if opts.git:
1255 if opts.git:
1256 if f in added:
1256 if f in added:
1257 mode = gitmode[ctx2.flags(f)]
1257 mode = gitmode[ctx2.flags(f)]
1258 if f in copy:
1258 if f in copy:
1259 a = copy[f]
1259 a = copy[f]
1260 omode = gitmode[man1.flags(a)]
1260 omode = gitmode[man1.flags(a)]
1261 _addmodehdr(header, omode, mode)
1261 _addmodehdr(header, omode, mode)
1262 if a in removed and a not in gone:
1262 if a in removed and a not in gone:
1263 op = 'rename'
1263 op = 'rename'
1264 gone.add(a)
1264 gone.add(a)
1265 else:
1265 else:
1266 op = 'copy'
1266 op = 'copy'
1267 header.append('%s from %s\n' % (op, a))
1267 header.append('%s from %s\n' % (op, a))
1268 header.append('%s to %s\n' % (op, f))
1268 header.append('%s to %s\n' % (op, f))
1269 to = getfilectx(a, ctx1).data()
1269 to = getfilectx(a, ctx1).data()
1270 else:
1270 else:
1271 header.append('new file mode %s\n' % mode)
1271 header.append('new file mode %s\n' % mode)
1272 if util.binary(tn):
1272 if util.binary(tn):
1273 dodiff = 'binary'
1273 dodiff = 'binary'
1274 elif f in removed:
1274 elif f in removed:
1275 # have we already reported a copy above?
1275 # have we already reported a copy above?
1276 if f in copy and copy[f] in added and copy[copy[f]] == f:
1276 if f in copy and copy[f] in added and copy[copy[f]] == f:
1277 dodiff = False
1277 dodiff = False
1278 else:
1278 else:
1279 header.append('deleted file mode %s\n' %
1279 header.append('deleted file mode %s\n' %
1280 gitmode[man1.flags(f)])
1280 gitmode[man1.flags(f)])
1281 else:
1281 else:
1282 omode = gitmode[man1.flags(f)]
1282 omode = gitmode[man1.flags(f)]
1283 nmode = gitmode[ctx2.flags(f)]
1283 nmode = gitmode[ctx2.flags(f)]
1284 _addmodehdr(header, omode, nmode)
1284 _addmodehdr(header, omode, nmode)
1285 if util.binary(to) or util.binary(tn):
1285 if util.binary(to) or util.binary(tn):
1286 dodiff = 'binary'
1286 dodiff = 'binary'
1287 r = None
1287 r = None
1288 header.insert(0, mdiff.diffline(r, a, b, opts))
1288 header.insert(0, mdiff.diffline(r, a, b, opts))
1289 if dodiff:
1289 if dodiff:
1290 if dodiff == 'binary':
1290 if dodiff == 'binary':
1291 text = b85diff(to, tn)
1291 text = b85diff(to, tn)
1292 else:
1292 else:
1293 text = mdiff.unidiff(to, date1,
1293 text = mdiff.unidiff(to, date1,
1294 # ctx2 date may be dynamic
1294 # ctx2 date may be dynamic
1295 tn, util.datestr(ctx2.date()),
1295 tn, util.datestr(ctx2.date()),
1296 a, b, r, opts=opts)
1296 a, b, r, opts=opts)
1297 if header and (text or len(header) > 1):
1297 if header and (text or len(header) > 1):
1298 yield ''.join(header)
1298 yield ''.join(header)
1299 if text:
1299 if text:
1300 yield text
1300 yield text
1301
1301
1302 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1302 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1303 opts=None):
1303 opts=None):
1304 '''export changesets as hg patches.'''
1304 '''export changesets as hg patches.'''
1305
1305
1306 total = len(revs)
1306 total = len(revs)
1307 revwidth = max([len(str(rev)) for rev in revs])
1307 revwidth = max([len(str(rev)) for rev in revs])
1308
1308
1309 def single(rev, seqno, fp):
1309 def single(rev, seqno, fp):
1310 ctx = repo[rev]
1310 ctx = repo[rev]
1311 node = ctx.node()
1311 node = ctx.node()
1312 parents = [p.node() for p in ctx.parents() if p]
1312 parents = [p.node() for p in ctx.parents() if p]
1313 branch = ctx.branch()
1313 branch = ctx.branch()
1314 if switch_parent:
1314 if switch_parent:
1315 parents.reverse()
1315 parents.reverse()
1316 prev = (parents and parents[0]) or nullid
1316 prev = (parents and parents[0]) or nullid
1317
1317
1318 if not fp:
1318 if not fp:
1319 fp = cmdutil.make_file(repo, template, node, total=total,
1319 fp = cmdutil.make_file(repo, template, node, total=total,
1320 seqno=seqno, revwidth=revwidth,
1320 seqno=seqno, revwidth=revwidth,
1321 mode='ab')
1321 mode='ab')
1322 if fp != sys.stdout and hasattr(fp, 'name'):
1322 if fp != sys.stdout and hasattr(fp, 'name'):
1323 repo.ui.note("%s\n" % fp.name)
1323 repo.ui.note("%s\n" % fp.name)
1324
1324
1325 fp.write("# HG changeset patch\n")
1325 fp.write("# HG changeset patch\n")
1326 fp.write("# User %s\n" % ctx.user())
1326 fp.write("# User %s\n" % ctx.user())
1327 fp.write("# Date %d %d\n" % ctx.date())
1327 fp.write("# Date %d %d\n" % ctx.date())
1328 if branch and (branch != 'default'):
1328 if branch and (branch != 'default'):
1329 fp.write("# Branch %s\n" % branch)
1329 fp.write("# Branch %s\n" % branch)
1330 fp.write("# Node ID %s\n" % hex(node))
1330 fp.write("# Node ID %s\n" % hex(node))
1331 fp.write("# Parent %s\n" % hex(prev))
1331 fp.write("# Parent %s\n" % hex(prev))
1332 if len(parents) > 1:
1332 if len(parents) > 1:
1333 fp.write("# Parent %s\n" % hex(parents[1]))
1333 fp.write("# Parent %s\n" % hex(parents[1]))
1334 fp.write(ctx.description().rstrip())
1334 fp.write(ctx.description().rstrip())
1335 fp.write("\n\n")
1335 fp.write("\n\n")
1336
1336
1337 for chunk in diff(repo, prev, node, opts=opts):
1337 for chunk in diff(repo, prev, node, opts=opts):
1338 fp.write(chunk)
1338 fp.write(chunk)
1339
1339
1340 for seqno, rev in enumerate(revs):
1340 for seqno, rev in enumerate(revs):
1341 single(rev, seqno+1, fp)
1341 single(rev, seqno+1, fp)
1342
1342
1343 def diffstatdata(lines):
1343 def diffstatdata(lines):
1344 filename, adds, removes = None, 0, 0
1344 filename, adds, removes = None, 0, 0
1345 for line in lines:
1345 for line in lines:
1346 if line.startswith('diff'):
1346 if line.startswith('diff'):
1347 if filename:
1347 if filename:
1348 yield (filename, adds, removes)
1348 yield (filename, adds, removes)
1349 # set numbers to 0 anyway when starting new file
1349 # set numbers to 0 anyway when starting new file
1350 adds, removes = 0, 0
1350 adds, removes = 0, 0
1351 if line.startswith('diff --git'):
1351 if line.startswith('diff --git'):
1352 filename = gitre.search(line).group(1)
1352 filename = gitre.search(line).group(1)
1353 else:
1353 else:
1354 # format: "diff -r ... -r ... file name"
1354 # format: "diff -r ... -r ... file name"
1355 filename = line.split(None, 5)[-1]
1355 filename = line.split(None, 5)[-1]
1356 elif line.startswith('+') and not line.startswith('+++'):
1356 elif line.startswith('+') and not line.startswith('+++'):
1357 adds += 1
1357 adds += 1
1358 elif line.startswith('-') and not line.startswith('---'):
1358 elif line.startswith('-') and not line.startswith('---'):
1359 removes += 1
1359 removes += 1
1360 if filename:
1360 if filename:
1361 yield (filename, adds, removes)
1361 yield (filename, adds, removes)
1362
1362
1363 def diffstat(lines, width=80):
1363 def diffstat(lines, width=80):
1364 output = []
1364 output = []
1365 stats = list(diffstatdata(lines))
1365 stats = list(diffstatdata(lines))
1366
1366
1367 maxtotal, maxname = 0, 0
1367 maxtotal, maxname = 0, 0
1368 totaladds, totalremoves = 0, 0
1368 totaladds, totalremoves = 0, 0
1369 for filename, adds, removes in stats:
1369 for filename, adds, removes in stats:
1370 totaladds += adds
1370 totaladds += adds
1371 totalremoves += removes
1371 totalremoves += removes
1372 maxname = max(maxname, len(filename))
1372 maxname = max(maxname, len(filename))
1373 maxtotal = max(maxtotal, adds+removes)
1373 maxtotal = max(maxtotal, adds+removes)
1374
1374
1375 countwidth = len(str(maxtotal))
1375 countwidth = len(str(maxtotal))
1376 graphwidth = width - countwidth - maxname
1376 graphwidth = width - countwidth - maxname
1377 if graphwidth < 10:
1377 if graphwidth < 10:
1378 graphwidth = 10
1378 graphwidth = 10
1379
1379
1380 factor = max(int(math.ceil(float(maxtotal) / graphwidth)), 1)
1380 factor = max(int(math.ceil(float(maxtotal) / graphwidth)), 1)
1381
1381
1382 for filename, adds, removes in stats:
1382 for filename, adds, removes in stats:
1383 # If diffstat runs out of room it doesn't print anything, which
1383 # If diffstat runs out of room it doesn't print anything, which
1384 # isn't very useful, so always print at least one + or - if there
1384 # isn't very useful, so always print at least one + or - if there
1385 # were at least some changes
1385 # were at least some changes
1386 pluses = '+' * max(adds/factor, int(bool(adds)))
1386 pluses = '+' * max(adds/factor, int(bool(adds)))
1387 minuses = '-' * max(removes/factor, int(bool(removes)))
1387 minuses = '-' * max(removes/factor, int(bool(removes)))
1388 output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth,
1388 output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth,
1389 adds+removes, pluses, minuses))
1389 adds+removes, pluses, minuses))
1390
1390
1391 if stats:
1391 if stats:
1392 output.append(' %d files changed, %d insertions(+), %d deletions(-)\n'
1392 output.append(' %d files changed, %d insertions(+), %d deletions(-)\n'
1393 % (len(stats), totaladds, totalremoves))
1393 % (len(stats), totaladds, totalremoves))
1394
1394
1395 return ''.join(output)
1395 return ''.join(output)
@@ -1,1389 +1,1389 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog flags
26 # revlog flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33
33
34 _prereadsize = 1048576
34 _prereadsize = 1048576
35
35
36 RevlogError = error.RevlogError
36 RevlogError = error.RevlogError
37 LookupError = error.LookupError
37 LookupError = error.LookupError
38
38
39 def getoffset(q):
39 def getoffset(q):
40 return int(q >> 16)
40 return int(q >> 16)
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return long(long(offset) << 16 | type)
46 return long(long(offset) << 16 | type)
47
47
48 nullhash = _sha(nullid)
48 nullhash = _sha(nullid)
49
49
50 def hash(text, p1, p2):
50 def hash(text, p1, p2):
51 """generate a hash from the given text and its parent hashes
51 """generate a hash from the given text and its parent hashes
52
52
53 This hash combines both the current file contents and its history
53 This hash combines both the current file contents and its history
54 in a manner that makes it easy to distinguish nodes with the same
54 in a manner that makes it easy to distinguish nodes with the same
55 content in the revision graph.
55 content in the revision graph.
56 """
56 """
57 # As of now, if one of the parent node is null, p2 is null
57 # As of now, if one of the parent node is null, p2 is null
58 if p2 == nullid:
58 if p2 == nullid:
59 # deep copy of a hash is faster than creating one
59 # deep copy of a hash is faster than creating one
60 s = nullhash.copy()
60 s = nullhash.copy()
61 s.update(p1)
61 s.update(p1)
62 else:
62 else:
63 # none of the parent nodes are nullid
63 # none of the parent nodes are nullid
64 l = [p1, p2]
64 l = [p1, p2]
65 l.sort()
65 l.sort()
66 s = _sha(l[0])
66 s = _sha(l[0])
67 s.update(l[1])
67 s.update(l[1])
68 s.update(text)
68 s.update(text)
69 return s.digest()
69 return s.digest()
70
70
71 def compress(text):
71 def compress(text):
72 """ generate a possibly-compressed representation of text """
72 """ generate a possibly-compressed representation of text """
73 if not text:
73 if not text:
74 return ("", text)
74 return ("", text)
75 l = len(text)
75 l = len(text)
76 bin = None
76 bin = None
77 if l < 44:
77 if l < 44:
78 pass
78 pass
79 elif l > 1000000:
79 elif l > 1000000:
80 # zlib makes an internal copy, thus doubling memory usage for
80 # zlib makes an internal copy, thus doubling memory usage for
81 # large files, so lets do this in pieces
81 # large files, so lets do this in pieces
82 z = zlib.compressobj()
82 z = zlib.compressobj()
83 p = []
83 p = []
84 pos = 0
84 pos = 0
85 while pos < l:
85 while pos < l:
86 pos2 = pos + 2**20
86 pos2 = pos + 2**20
87 p.append(z.compress(text[pos:pos2]))
87 p.append(z.compress(text[pos:pos2]))
88 pos = pos2
88 pos = pos2
89 p.append(z.flush())
89 p.append(z.flush())
90 if sum(map(len, p)) < l:
90 if sum(map(len, p)) < l:
91 bin = "".join(p)
91 bin = "".join(p)
92 else:
92 else:
93 bin = _compress(text)
93 bin = _compress(text)
94 if bin is None or len(bin) > l:
94 if bin is None or len(bin) > l:
95 if text[0] == '\0':
95 if text[0] == '\0':
96 return ("", text)
96 return ("", text)
97 return ('u', text)
97 return ('u', text)
98 return ("", bin)
98 return ("", bin)
99
99
100 def decompress(bin):
100 def decompress(bin):
101 """ decompress the given input """
101 """ decompress the given input """
102 if not bin:
102 if not bin:
103 return bin
103 return bin
104 t = bin[0]
104 t = bin[0]
105 if t == '\0':
105 if t == '\0':
106 return bin
106 return bin
107 if t == 'x':
107 if t == 'x':
108 return _decompress(bin)
108 return _decompress(bin)
109 if t == 'u':
109 if t == 'u':
110 return bin[1:]
110 return bin[1:]
111 raise RevlogError(_("unknown compression type %r") % t)
111 raise RevlogError(_("unknown compression type %r") % t)
112
112
113 class lazyparser(object):
113 class lazyparser(object):
114 """
114 """
115 this class avoids the need to parse the entirety of large indices
115 this class avoids the need to parse the entirety of large indices
116 """
116 """
117
117
118 # lazyparser is not safe to use on windows if win32 extensions not
118 # lazyparser is not safe to use on windows if win32 extensions not
119 # available. it keeps file handle open, which make it not possible
119 # available. it keeps file handle open, which make it not possible
120 # to break hardlinks on local cloned repos.
120 # to break hardlinks on local cloned repos.
121
121
122 def __init__(self, dataf, size):
122 def __init__(self, dataf, size):
123 self.dataf = dataf
123 self.dataf = dataf
124 self.s = struct.calcsize(indexformatng)
124 self.s = struct.calcsize(indexformatng)
125 self.datasize = size
125 self.datasize = size
126 self.l = size/self.s
126 self.l = size/self.s
127 self.index = [None] * self.l
127 self.index = [None] * self.l
128 self.map = {nullid: nullrev}
128 self.map = {nullid: nullrev}
129 self.allmap = 0
129 self.allmap = 0
130 self.all = 0
130 self.all = 0
131 self.mapfind_count = 0
131 self.mapfind_count = 0
132
132
133 def loadmap(self):
133 def loadmap(self):
134 """
134 """
135 during a commit, we need to make sure the rev being added is
135 during a commit, we need to make sure the rev being added is
136 not a duplicate. This requires loading the entire index,
136 not a duplicate. This requires loading the entire index,
137 which is fairly slow. loadmap can load up just the node map,
137 which is fairly slow. loadmap can load up just the node map,
138 which takes much less time.
138 which takes much less time.
139 """
139 """
140 if self.allmap:
140 if self.allmap:
141 return
141 return
142 end = self.datasize
142 end = self.datasize
143 self.allmap = 1
143 self.allmap = 1
144 cur = 0
144 cur = 0
145 count = 0
145 count = 0
146 blocksize = self.s * 256
146 blocksize = self.s * 256
147 self.dataf.seek(0)
147 self.dataf.seek(0)
148 while cur < end:
148 while cur < end:
149 data = self.dataf.read(blocksize)
149 data = self.dataf.read(blocksize)
150 off = 0
150 off = 0
151 for x in xrange(256):
151 for x in xrange(256):
152 n = data[off + ngshaoffset:off + ngshaoffset + 20]
152 n = data[off + ngshaoffset:off + ngshaoffset + 20]
153 self.map[n] = count
153 self.map[n] = count
154 count += 1
154 count += 1
155 if count >= self.l:
155 if count >= self.l:
156 break
156 break
157 off += self.s
157 off += self.s
158 cur += blocksize
158 cur += blocksize
159
159
160 def loadblock(self, blockstart, blocksize, data=None):
160 def loadblock(self, blockstart, blocksize, data=None):
161 if self.all:
161 if self.all:
162 return
162 return
163 if data is None:
163 if data is None:
164 self.dataf.seek(blockstart)
164 self.dataf.seek(blockstart)
165 if blockstart + blocksize > self.datasize:
165 if blockstart + blocksize > self.datasize:
166 # the revlog may have grown since we've started running,
166 # the revlog may have grown since we've started running,
167 # but we don't have space in self.index for more entries.
167 # but we don't have space in self.index for more entries.
168 # limit blocksize so that we don't get too much data.
168 # limit blocksize so that we don't get too much data.
169 blocksize = max(self.datasize - blockstart, 0)
169 blocksize = max(self.datasize - blockstart, 0)
170 data = self.dataf.read(blocksize)
170 data = self.dataf.read(blocksize)
171 lend = len(data) / self.s
171 lend = len(data) / self.s
172 i = blockstart / self.s
172 i = blockstart / self.s
173 off = 0
173 off = 0
174 # lazyindex supports __delitem__
174 # lazyindex supports __delitem__
175 if lend > len(self.index) - i:
175 if lend > len(self.index) - i:
176 lend = len(self.index) - i
176 lend = len(self.index) - i
177 for x in xrange(lend):
177 for x in xrange(lend):
178 if self.index[i + x] == None:
178 if self.index[i + x] is None:
179 b = data[off : off + self.s]
179 b = data[off : off + self.s]
180 self.index[i + x] = b
180 self.index[i + x] = b
181 n = b[ngshaoffset:ngshaoffset + 20]
181 n = b[ngshaoffset:ngshaoffset + 20]
182 self.map[n] = i + x
182 self.map[n] = i + x
183 off += self.s
183 off += self.s
184
184
185 def findnode(self, node):
185 def findnode(self, node):
186 """search backwards through the index file for a specific node"""
186 """search backwards through the index file for a specific node"""
187 if self.allmap:
187 if self.allmap:
188 return None
188 return None
189
189
190 # hg log will cause many many searches for the manifest
190 # hg log will cause many many searches for the manifest
191 # nodes. After we get called a few times, just load the whole
191 # nodes. After we get called a few times, just load the whole
192 # thing.
192 # thing.
193 if self.mapfind_count > 8:
193 if self.mapfind_count > 8:
194 self.loadmap()
194 self.loadmap()
195 if node in self.map:
195 if node in self.map:
196 return node
196 return node
197 return None
197 return None
198 self.mapfind_count += 1
198 self.mapfind_count += 1
199 last = self.l - 1
199 last = self.l - 1
200 while self.index[last] != None:
200 while self.index[last] != None:
201 if last == 0:
201 if last == 0:
202 self.all = 1
202 self.all = 1
203 self.allmap = 1
203 self.allmap = 1
204 return None
204 return None
205 last -= 1
205 last -= 1
206 end = (last + 1) * self.s
206 end = (last + 1) * self.s
207 blocksize = self.s * 256
207 blocksize = self.s * 256
208 while end >= 0:
208 while end >= 0:
209 start = max(end - blocksize, 0)
209 start = max(end - blocksize, 0)
210 self.dataf.seek(start)
210 self.dataf.seek(start)
211 data = self.dataf.read(end - start)
211 data = self.dataf.read(end - start)
212 findend = end - start
212 findend = end - start
213 while True:
213 while True:
214 # we're searching backwards, so we have to make sure
214 # we're searching backwards, so we have to make sure
215 # we don't find a changeset where this node is a parent
215 # we don't find a changeset where this node is a parent
216 off = data.find(node, 0, findend)
216 off = data.find(node, 0, findend)
217 findend = off
217 findend = off
218 if off >= 0:
218 if off >= 0:
219 i = off / self.s
219 i = off / self.s
220 off = i * self.s
220 off = i * self.s
221 n = data[off + ngshaoffset:off + ngshaoffset + 20]
221 n = data[off + ngshaoffset:off + ngshaoffset + 20]
222 if n == node:
222 if n == node:
223 self.map[n] = i + start / self.s
223 self.map[n] = i + start / self.s
224 return node
224 return node
225 else:
225 else:
226 break
226 break
227 end -= blocksize
227 end -= blocksize
228 return None
228 return None
229
229
230 def loadindex(self, i=None, end=None):
230 def loadindex(self, i=None, end=None):
231 if self.all:
231 if self.all:
232 return
232 return
233 all = False
233 all = False
234 if i == None:
234 if i is None:
235 blockstart = 0
235 blockstart = 0
236 blocksize = (65536 / self.s) * self.s
236 blocksize = (65536 / self.s) * self.s
237 end = self.datasize
237 end = self.datasize
238 all = True
238 all = True
239 else:
239 else:
240 if end:
240 if end:
241 blockstart = i * self.s
241 blockstart = i * self.s
242 end = end * self.s
242 end = end * self.s
243 blocksize = end - blockstart
243 blocksize = end - blockstart
244 else:
244 else:
245 blockstart = (i & ~1023) * self.s
245 blockstart = (i & ~1023) * self.s
246 blocksize = self.s * 1024
246 blocksize = self.s * 1024
247 end = blockstart + blocksize
247 end = blockstart + blocksize
248 while blockstart < end:
248 while blockstart < end:
249 self.loadblock(blockstart, blocksize)
249 self.loadblock(blockstart, blocksize)
250 blockstart += blocksize
250 blockstart += blocksize
251 if all:
251 if all:
252 self.all = True
252 self.all = True
253
253
254 class lazyindex(object):
254 class lazyindex(object):
255 """a lazy version of the index array"""
255 """a lazy version of the index array"""
256 def __init__(self, parser):
256 def __init__(self, parser):
257 self.p = parser
257 self.p = parser
258 def __len__(self):
258 def __len__(self):
259 return len(self.p.index)
259 return len(self.p.index)
260 def load(self, pos):
260 def load(self, pos):
261 if pos < 0:
261 if pos < 0:
262 pos += len(self.p.index)
262 pos += len(self.p.index)
263 self.p.loadindex(pos)
263 self.p.loadindex(pos)
264 return self.p.index[pos]
264 return self.p.index[pos]
265 def __getitem__(self, pos):
265 def __getitem__(self, pos):
266 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
266 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
267 def __setitem__(self, pos, item):
267 def __setitem__(self, pos, item):
268 self.p.index[pos] = _pack(indexformatng, *item)
268 self.p.index[pos] = _pack(indexformatng, *item)
269 def __delitem__(self, pos):
269 def __delitem__(self, pos):
270 del self.p.index[pos]
270 del self.p.index[pos]
271 def insert(self, pos, e):
271 def insert(self, pos, e):
272 self.p.index.insert(pos, _pack(indexformatng, *e))
272 self.p.index.insert(pos, _pack(indexformatng, *e))
273 def append(self, e):
273 def append(self, e):
274 self.p.index.append(_pack(indexformatng, *e))
274 self.p.index.append(_pack(indexformatng, *e))
275
275
276 class lazymap(object):
276 class lazymap(object):
277 """a lazy version of the node map"""
277 """a lazy version of the node map"""
278 def __init__(self, parser):
278 def __init__(self, parser):
279 self.p = parser
279 self.p = parser
280 def load(self, key):
280 def load(self, key):
281 n = self.p.findnode(key)
281 n = self.p.findnode(key)
282 if n == None:
282 if n is None:
283 raise KeyError(key)
283 raise KeyError(key)
284 def __contains__(self, key):
284 def __contains__(self, key):
285 if key in self.p.map:
285 if key in self.p.map:
286 return True
286 return True
287 self.p.loadmap()
287 self.p.loadmap()
288 return key in self.p.map
288 return key in self.p.map
289 def __iter__(self):
289 def __iter__(self):
290 yield nullid
290 yield nullid
291 for i in xrange(self.p.l):
291 for i in xrange(self.p.l):
292 ret = self.p.index[i]
292 ret = self.p.index[i]
293 if not ret:
293 if not ret:
294 self.p.loadindex(i)
294 self.p.loadindex(i)
295 ret = self.p.index[i]
295 ret = self.p.index[i]
296 if isinstance(ret, str):
296 if isinstance(ret, str):
297 ret = _unpack(indexformatng, ret)
297 ret = _unpack(indexformatng, ret)
298 yield ret[7]
298 yield ret[7]
299 def __getitem__(self, key):
299 def __getitem__(self, key):
300 try:
300 try:
301 return self.p.map[key]
301 return self.p.map[key]
302 except KeyError:
302 except KeyError:
303 try:
303 try:
304 self.load(key)
304 self.load(key)
305 return self.p.map[key]
305 return self.p.map[key]
306 except KeyError:
306 except KeyError:
307 raise KeyError("node " + hex(key))
307 raise KeyError("node " + hex(key))
308 def __setitem__(self, key, val):
308 def __setitem__(self, key, val):
309 self.p.map[key] = val
309 self.p.map[key] = val
310 def __delitem__(self, key):
310 def __delitem__(self, key):
311 del self.p.map[key]
311 del self.p.map[key]
312
312
313 indexformatv0 = ">4l20s20s20s"
313 indexformatv0 = ">4l20s20s20s"
314 v0shaoffset = 56
314 v0shaoffset = 56
315
315
316 class revlogoldio(object):
316 class revlogoldio(object):
317 def __init__(self):
317 def __init__(self):
318 self.size = struct.calcsize(indexformatv0)
318 self.size = struct.calcsize(indexformatv0)
319
319
320 def parseindex(self, fp, data, inline):
320 def parseindex(self, fp, data, inline):
321 s = self.size
321 s = self.size
322 index = []
322 index = []
323 nodemap = {nullid: nullrev}
323 nodemap = {nullid: nullrev}
324 n = off = 0
324 n = off = 0
325 if len(data) < _prereadsize:
325 if len(data) < _prereadsize:
326 data += fp.read() # read the rest
326 data += fp.read() # read the rest
327 l = len(data)
327 l = len(data)
328 while off + s <= l:
328 while off + s <= l:
329 cur = data[off:off + s]
329 cur = data[off:off + s]
330 off += s
330 off += s
331 e = _unpack(indexformatv0, cur)
331 e = _unpack(indexformatv0, cur)
332 # transform to revlogv1 format
332 # transform to revlogv1 format
333 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
333 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
334 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
334 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
335 index.append(e2)
335 index.append(e2)
336 nodemap[e[6]] = n
336 nodemap[e[6]] = n
337 n += 1
337 n += 1
338
338
339 return index, nodemap, None
339 return index, nodemap, None
340
340
341 def packentry(self, entry, node, version, rev):
341 def packentry(self, entry, node, version, rev):
342 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
342 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
343 node(entry[5]), node(entry[6]), entry[7])
343 node(entry[5]), node(entry[6]), entry[7])
344 return _pack(indexformatv0, *e2)
344 return _pack(indexformatv0, *e2)
345
345
346 # index ng:
346 # index ng:
347 # 6 bytes offset
347 # 6 bytes offset
348 # 2 bytes flags
348 # 2 bytes flags
349 # 4 bytes compressed length
349 # 4 bytes compressed length
350 # 4 bytes uncompressed length
350 # 4 bytes uncompressed length
351 # 4 bytes: base rev
351 # 4 bytes: base rev
352 # 4 bytes link rev
352 # 4 bytes link rev
353 # 4 bytes parent 1 rev
353 # 4 bytes parent 1 rev
354 # 4 bytes parent 2 rev
354 # 4 bytes parent 2 rev
355 # 32 bytes: nodeid
355 # 32 bytes: nodeid
356 indexformatng = ">Qiiiiii20s12x"
356 indexformatng = ">Qiiiiii20s12x"
357 ngshaoffset = 32
357 ngshaoffset = 32
358 versionformat = ">I"
358 versionformat = ">I"
359
359
360 class revlogio(object):
360 class revlogio(object):
361 def __init__(self):
361 def __init__(self):
362 self.size = struct.calcsize(indexformatng)
362 self.size = struct.calcsize(indexformatng)
363
363
364 def parseindex(self, fp, data, inline):
364 def parseindex(self, fp, data, inline):
365 try:
365 try:
366 size = len(data)
366 size = len(data)
367 if size == _prereadsize:
367 if size == _prereadsize:
368 size = util.fstat(fp).st_size
368 size = util.fstat(fp).st_size
369 except AttributeError:
369 except AttributeError:
370 size = 0
370 size = 0
371
371
372 if util.openhardlinks() and not inline and size > _prereadsize:
372 if util.openhardlinks() and not inline and size > _prereadsize:
373 # big index, let's parse it on demand
373 # big index, let's parse it on demand
374 parser = lazyparser(fp, size)
374 parser = lazyparser(fp, size)
375 index = lazyindex(parser)
375 index = lazyindex(parser)
376 nodemap = lazymap(parser)
376 nodemap = lazymap(parser)
377 e = list(index[0])
377 e = list(index[0])
378 type = gettype(e[0])
378 type = gettype(e[0])
379 e[0] = offset_type(0, type)
379 e[0] = offset_type(0, type)
380 index[0] = e
380 index[0] = e
381 return index, nodemap, None
381 return index, nodemap, None
382
382
383 # call the C implementation to parse the index data
383 # call the C implementation to parse the index data
384 index, nodemap, cache = parsers.parse_index(data, inline)
384 index, nodemap, cache = parsers.parse_index(data, inline)
385 return index, nodemap, cache
385 return index, nodemap, cache
386
386
387 def packentry(self, entry, node, version, rev):
387 def packentry(self, entry, node, version, rev):
388 p = _pack(indexformatng, *entry)
388 p = _pack(indexformatng, *entry)
389 if rev == 0:
389 if rev == 0:
390 p = _pack(versionformat, version) + p[4:]
390 p = _pack(versionformat, version) + p[4:]
391 return p
391 return p
392
392
393 class revlog(object):
393 class revlog(object):
394 """
394 """
395 the underlying revision storage object
395 the underlying revision storage object
396
396
397 A revlog consists of two parts, an index and the revision data.
397 A revlog consists of two parts, an index and the revision data.
398
398
399 The index is a file with a fixed record size containing
399 The index is a file with a fixed record size containing
400 information on each revision, including its nodeid (hash), the
400 information on each revision, including its nodeid (hash), the
401 nodeids of its parents, the position and offset of its data within
401 nodeids of its parents, the position and offset of its data within
402 the data file, and the revision it's based on. Finally, each entry
402 the data file, and the revision it's based on. Finally, each entry
403 contains a linkrev entry that can serve as a pointer to external
403 contains a linkrev entry that can serve as a pointer to external
404 data.
404 data.
405
405
406 The revision data itself is a linear collection of data chunks.
406 The revision data itself is a linear collection of data chunks.
407 Each chunk represents a revision and is usually represented as a
407 Each chunk represents a revision and is usually represented as a
408 delta against the previous chunk. To bound lookup time, runs of
408 delta against the previous chunk. To bound lookup time, runs of
409 deltas are limited to about 2 times the length of the original
409 deltas are limited to about 2 times the length of the original
410 version data. This makes retrieval of a version proportional to
410 version data. This makes retrieval of a version proportional to
411 its size, or O(1) relative to the number of revisions.
411 its size, or O(1) relative to the number of revisions.
412
412
413 Both pieces of the revlog are written to in an append-only
413 Both pieces of the revlog are written to in an append-only
414 fashion, which means we never need to rewrite a file to insert or
414 fashion, which means we never need to rewrite a file to insert or
415 remove data, and can use some simple techniques to avoid the need
415 remove data, and can use some simple techniques to avoid the need
416 for locking while reading.
416 for locking while reading.
417 """
417 """
418 def __init__(self, opener, indexfile):
418 def __init__(self, opener, indexfile):
419 """
419 """
420 create a revlog object
420 create a revlog object
421
421
422 opener is a function that abstracts the file opening operation
422 opener is a function that abstracts the file opening operation
423 and can be used to implement COW semantics or the like.
423 and can be used to implement COW semantics or the like.
424 """
424 """
425 self.indexfile = indexfile
425 self.indexfile = indexfile
426 self.datafile = indexfile[:-2] + ".d"
426 self.datafile = indexfile[:-2] + ".d"
427 self.opener = opener
427 self.opener = opener
428 self._cache = None
428 self._cache = None
429 self._chunkcache = (0, '')
429 self._chunkcache = (0, '')
430 self.nodemap = {nullid: nullrev}
430 self.nodemap = {nullid: nullrev}
431 self.index = []
431 self.index = []
432
432
433 v = REVLOG_DEFAULT_VERSION
433 v = REVLOG_DEFAULT_VERSION
434 if hasattr(opener, "defversion"):
434 if hasattr(opener, "defversion"):
435 v = opener.defversion
435 v = opener.defversion
436 if v & REVLOGNG:
436 if v & REVLOGNG:
437 v |= REVLOGNGINLINEDATA
437 v |= REVLOGNGINLINEDATA
438
438
439 i = ''
439 i = ''
440 try:
440 try:
441 f = self.opener(self.indexfile)
441 f = self.opener(self.indexfile)
442 i = f.read(_prereadsize)
442 i = f.read(_prereadsize)
443 if len(i) > 0:
443 if len(i) > 0:
444 v = struct.unpack(versionformat, i[:4])[0]
444 v = struct.unpack(versionformat, i[:4])[0]
445 except IOError, inst:
445 except IOError, inst:
446 if inst.errno != errno.ENOENT:
446 if inst.errno != errno.ENOENT:
447 raise
447 raise
448
448
449 self.version = v
449 self.version = v
450 self._inline = v & REVLOGNGINLINEDATA
450 self._inline = v & REVLOGNGINLINEDATA
451 flags = v & ~0xFFFF
451 flags = v & ~0xFFFF
452 fmt = v & 0xFFFF
452 fmt = v & 0xFFFF
453 if fmt == REVLOGV0 and flags:
453 if fmt == REVLOGV0 and flags:
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 % (self.indexfile, flags >> 16))
455 % (self.indexfile, flags >> 16))
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 % (self.indexfile, flags >> 16))
458 % (self.indexfile, flags >> 16))
459 elif fmt > REVLOGNG:
459 elif fmt > REVLOGNG:
460 raise RevlogError(_("index %s unknown format %d")
460 raise RevlogError(_("index %s unknown format %d")
461 % (self.indexfile, fmt))
461 % (self.indexfile, fmt))
462
462
463 self._io = revlogio()
463 self._io = revlogio()
464 if self.version == REVLOGV0:
464 if self.version == REVLOGV0:
465 self._io = revlogoldio()
465 self._io = revlogoldio()
466 if i:
466 if i:
467 try:
467 try:
468 d = self._io.parseindex(f, i, self._inline)
468 d = self._io.parseindex(f, i, self._inline)
469 except (ValueError, IndexError), e:
469 except (ValueError, IndexError), e:
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 self.index, self.nodemap, self._chunkcache = d
471 self.index, self.nodemap, self._chunkcache = d
472 if not self._chunkcache:
472 if not self._chunkcache:
473 self._chunkcache = (0, '')
473 self._chunkcache = (0, '')
474
474
475 # add the magic null revision at -1 (if it hasn't been done already)
475 # add the magic null revision at -1 (if it hasn't been done already)
476 if (self.index == [] or isinstance(self.index, lazyindex) or
476 if (self.index == [] or isinstance(self.index, lazyindex) or
477 self.index[-1][7] != nullid) :
477 self.index[-1][7] != nullid) :
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479
479
480 def _loadindex(self, start, end):
480 def _loadindex(self, start, end):
481 """load a block of indexes all at once from the lazy parser"""
481 """load a block of indexes all at once from the lazy parser"""
482 if isinstance(self.index, lazyindex):
482 if isinstance(self.index, lazyindex):
483 self.index.p.loadindex(start, end)
483 self.index.p.loadindex(start, end)
484
484
485 def _loadindexmap(self):
485 def _loadindexmap(self):
486 """loads both the map and the index from the lazy parser"""
486 """loads both the map and the index from the lazy parser"""
487 if isinstance(self.index, lazyindex):
487 if isinstance(self.index, lazyindex):
488 p = self.index.p
488 p = self.index.p
489 p.loadindex()
489 p.loadindex()
490 self.nodemap = p.map
490 self.nodemap = p.map
491
491
492 def _loadmap(self):
492 def _loadmap(self):
493 """loads the map from the lazy parser"""
493 """loads the map from the lazy parser"""
494 if isinstance(self.nodemap, lazymap):
494 if isinstance(self.nodemap, lazymap):
495 self.nodemap.p.loadmap()
495 self.nodemap.p.loadmap()
496 self.nodemap = self.nodemap.p.map
496 self.nodemap = self.nodemap.p.map
497
497
498 def tip(self):
498 def tip(self):
499 return self.node(len(self.index) - 2)
499 return self.node(len(self.index) - 2)
500 def __len__(self):
500 def __len__(self):
501 return len(self.index) - 1
501 return len(self.index) - 1
502 def __iter__(self):
502 def __iter__(self):
503 for i in xrange(len(self)):
503 for i in xrange(len(self)):
504 yield i
504 yield i
505 def rev(self, node):
505 def rev(self, node):
506 try:
506 try:
507 return self.nodemap[node]
507 return self.nodemap[node]
508 except KeyError:
508 except KeyError:
509 raise LookupError(node, self.indexfile, _('no node'))
509 raise LookupError(node, self.indexfile, _('no node'))
510 def node(self, rev):
510 def node(self, rev):
511 return self.index[rev][7]
511 return self.index[rev][7]
512 def linkrev(self, rev):
512 def linkrev(self, rev):
513 return self.index[rev][4]
513 return self.index[rev][4]
514 def parents(self, node):
514 def parents(self, node):
515 i = self.index
515 i = self.index
516 d = i[self.rev(node)]
516 d = i[self.rev(node)]
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 def parentrevs(self, rev):
518 def parentrevs(self, rev):
519 return self.index[rev][5:7]
519 return self.index[rev][5:7]
520 def start(self, rev):
520 def start(self, rev):
521 return int(self.index[rev][0] >> 16)
521 return int(self.index[rev][0] >> 16)
522 def end(self, rev):
522 def end(self, rev):
523 return self.start(rev) + self.length(rev)
523 return self.start(rev) + self.length(rev)
524 def length(self, rev):
524 def length(self, rev):
525 return self.index[rev][1]
525 return self.index[rev][1]
526 def base(self, rev):
526 def base(self, rev):
527 return self.index[rev][3]
527 return self.index[rev][3]
528
528
529 def size(self, rev):
529 def size(self, rev):
530 """return the length of the uncompressed text for a given revision"""
530 """return the length of the uncompressed text for a given revision"""
531 l = self.index[rev][2]
531 l = self.index[rev][2]
532 if l >= 0:
532 if l >= 0:
533 return l
533 return l
534
534
535 t = self.revision(self.node(rev))
535 t = self.revision(self.node(rev))
536 return len(t)
536 return len(t)
537
537
538 # alternate implementation, The advantage to this code is it
538 # alternate implementation, The advantage to this code is it
539 # will be faster for a single revision. But, the results are not
539 # will be faster for a single revision. But, the results are not
540 # cached, so finding the size of every revision will be slower.
540 # cached, so finding the size of every revision will be slower.
541 """
541 """
542 if self.cache and self.cache[1] == rev:
542 if self.cache and self.cache[1] == rev:
543 return len(self.cache[2])
543 return len(self.cache[2])
544
544
545 base = self.base(rev)
545 base = self.base(rev)
546 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
546 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
547 base = self.cache[1]
547 base = self.cache[1]
548 text = self.cache[2]
548 text = self.cache[2]
549 else:
549 else:
550 text = self.revision(self.node(base))
550 text = self.revision(self.node(base))
551
551
552 l = len(text)
552 l = len(text)
553 for x in xrange(base + 1, rev + 1):
553 for x in xrange(base + 1, rev + 1):
554 l = mdiff.patchedsize(l, self.chunk(x))
554 l = mdiff.patchedsize(l, self.chunk(x))
555 return l
555 return l
556 """
556 """
557
557
558 def reachable(self, node, stop=None):
558 def reachable(self, node, stop=None):
559 """return the set of all nodes ancestral to a given node, including
559 """return the set of all nodes ancestral to a given node, including
560 the node itself, stopping when stop is matched"""
560 the node itself, stopping when stop is matched"""
561 reachable = set((node,))
561 reachable = set((node,))
562 visit = [node]
562 visit = [node]
563 if stop:
563 if stop:
564 stopn = self.rev(stop)
564 stopn = self.rev(stop)
565 else:
565 else:
566 stopn = 0
566 stopn = 0
567 while visit:
567 while visit:
568 n = visit.pop(0)
568 n = visit.pop(0)
569 if n == stop:
569 if n == stop:
570 continue
570 continue
571 if n == nullid:
571 if n == nullid:
572 continue
572 continue
573 for p in self.parents(n):
573 for p in self.parents(n):
574 if self.rev(p) < stopn:
574 if self.rev(p) < stopn:
575 continue
575 continue
576 if p not in reachable:
576 if p not in reachable:
577 reachable.add(p)
577 reachable.add(p)
578 visit.append(p)
578 visit.append(p)
579 return reachable
579 return reachable
580
580
581 def ancestors(self, *revs):
581 def ancestors(self, *revs):
582 'Generate the ancestors of revs using a breadth-first visit'
582 'Generate the ancestors of revs using a breadth-first visit'
583 visit = list(revs)
583 visit = list(revs)
584 seen = set([nullrev])
584 seen = set([nullrev])
585 while visit:
585 while visit:
586 for parent in self.parentrevs(visit.pop(0)):
586 for parent in self.parentrevs(visit.pop(0)):
587 if parent not in seen:
587 if parent not in seen:
588 visit.append(parent)
588 visit.append(parent)
589 seen.add(parent)
589 seen.add(parent)
590 yield parent
590 yield parent
591
591
592 def descendants(self, *revs):
592 def descendants(self, *revs):
593 'Generate the descendants of revs in topological order'
593 'Generate the descendants of revs in topological order'
594 seen = set(revs)
594 seen = set(revs)
595 for i in xrange(min(revs) + 1, len(self)):
595 for i in xrange(min(revs) + 1, len(self)):
596 for x in self.parentrevs(i):
596 for x in self.parentrevs(i):
597 if x != nullrev and x in seen:
597 if x != nullrev and x in seen:
598 seen.add(i)
598 seen.add(i)
599 yield i
599 yield i
600 break
600 break
601
601
602 def findmissing(self, common=None, heads=None):
602 def findmissing(self, common=None, heads=None):
603 '''
603 '''
604 returns the topologically sorted list of nodes from the set:
604 returns the topologically sorted list of nodes from the set:
605 missing = (ancestors(heads) \ ancestors(common))
605 missing = (ancestors(heads) \ ancestors(common))
606
606
607 where ancestors() is the set of ancestors from heads, heads included
607 where ancestors() is the set of ancestors from heads, heads included
608
608
609 if heads is None, the heads of the revlog are used
609 if heads is None, the heads of the revlog are used
610 if common is None, nullid is assumed to be a common node
610 if common is None, nullid is assumed to be a common node
611 '''
611 '''
612 if common is None:
612 if common is None:
613 common = [nullid]
613 common = [nullid]
614 if heads is None:
614 if heads is None:
615 heads = self.heads()
615 heads = self.heads()
616
616
617 common = [self.rev(n) for n in common]
617 common = [self.rev(n) for n in common]
618 heads = [self.rev(n) for n in heads]
618 heads = [self.rev(n) for n in heads]
619
619
620 # we want the ancestors, but inclusive
620 # we want the ancestors, but inclusive
621 has = set(self.ancestors(*common))
621 has = set(self.ancestors(*common))
622 has.add(nullrev)
622 has.add(nullrev)
623 has.update(common)
623 has.update(common)
624
624
625 # take all ancestors from heads that aren't in has
625 # take all ancestors from heads that aren't in has
626 missing = set()
626 missing = set()
627 visit = [r for r in heads if r not in has]
627 visit = [r for r in heads if r not in has]
628 while visit:
628 while visit:
629 r = visit.pop(0)
629 r = visit.pop(0)
630 if r in missing:
630 if r in missing:
631 continue
631 continue
632 else:
632 else:
633 missing.add(r)
633 missing.add(r)
634 for p in self.parentrevs(r):
634 for p in self.parentrevs(r):
635 if p not in has:
635 if p not in has:
636 visit.append(p)
636 visit.append(p)
637 missing = list(missing)
637 missing = list(missing)
638 missing.sort()
638 missing.sort()
639 return [self.node(r) for r in missing]
639 return [self.node(r) for r in missing]
640
640
641 def nodesbetween(self, roots=None, heads=None):
641 def nodesbetween(self, roots=None, heads=None):
642 """Return a tuple containing three elements. Elements 1 and 2 contain
642 """Return a tuple containing three elements. Elements 1 and 2 contain
643 a final list bases and heads after all the unreachable ones have been
643 a final list bases and heads after all the unreachable ones have been
644 pruned. Element 0 contains a topologically sorted list of all
644 pruned. Element 0 contains a topologically sorted list of all
645
645
646 nodes that satisfy these constraints:
646 nodes that satisfy these constraints:
647 1. All nodes must be descended from a node in roots (the nodes on
647 1. All nodes must be descended from a node in roots (the nodes on
648 roots are considered descended from themselves).
648 roots are considered descended from themselves).
649 2. All nodes must also be ancestors of a node in heads (the nodes in
649 2. All nodes must also be ancestors of a node in heads (the nodes in
650 heads are considered to be their own ancestors).
650 heads are considered to be their own ancestors).
651
651
652 If roots is unspecified, nullid is assumed as the only root.
652 If roots is unspecified, nullid is assumed as the only root.
653 If heads is unspecified, it is taken to be the output of the
653 If heads is unspecified, it is taken to be the output of the
654 heads method (i.e. a list of all nodes in the repository that
654 heads method (i.e. a list of all nodes in the repository that
655 have no children)."""
655 have no children)."""
656 nonodes = ([], [], [])
656 nonodes = ([], [], [])
657 if roots is not None:
657 if roots is not None:
658 roots = list(roots)
658 roots = list(roots)
659 if not roots:
659 if not roots:
660 return nonodes
660 return nonodes
661 lowestrev = min([self.rev(n) for n in roots])
661 lowestrev = min([self.rev(n) for n in roots])
662 else:
662 else:
663 roots = [nullid] # Everybody's a descendent of nullid
663 roots = [nullid] # Everybody's a descendent of nullid
664 lowestrev = nullrev
664 lowestrev = nullrev
665 if (lowestrev == nullrev) and (heads is None):
665 if (lowestrev == nullrev) and (heads is None):
666 # We want _all_ the nodes!
666 # We want _all_ the nodes!
667 return ([self.node(r) for r in self], [nullid], list(self.heads()))
667 return ([self.node(r) for r in self], [nullid], list(self.heads()))
668 if heads is None:
668 if heads is None:
669 # All nodes are ancestors, so the latest ancestor is the last
669 # All nodes are ancestors, so the latest ancestor is the last
670 # node.
670 # node.
671 highestrev = len(self) - 1
671 highestrev = len(self) - 1
672 # Set ancestors to None to signal that every node is an ancestor.
672 # Set ancestors to None to signal that every node is an ancestor.
673 ancestors = None
673 ancestors = None
674 # Set heads to an empty dictionary for later discovery of heads
674 # Set heads to an empty dictionary for later discovery of heads
675 heads = {}
675 heads = {}
676 else:
676 else:
677 heads = list(heads)
677 heads = list(heads)
678 if not heads:
678 if not heads:
679 return nonodes
679 return nonodes
680 ancestors = set()
680 ancestors = set()
681 # Turn heads into a dictionary so we can remove 'fake' heads.
681 # Turn heads into a dictionary so we can remove 'fake' heads.
682 # Also, later we will be using it to filter out the heads we can't
682 # Also, later we will be using it to filter out the heads we can't
683 # find from roots.
683 # find from roots.
684 heads = dict.fromkeys(heads, 0)
684 heads = dict.fromkeys(heads, 0)
685 # Start at the top and keep marking parents until we're done.
685 # Start at the top and keep marking parents until we're done.
686 nodestotag = set(heads)
686 nodestotag = set(heads)
687 # Remember where the top was so we can use it as a limit later.
687 # Remember where the top was so we can use it as a limit later.
688 highestrev = max([self.rev(n) for n in nodestotag])
688 highestrev = max([self.rev(n) for n in nodestotag])
689 while nodestotag:
689 while nodestotag:
690 # grab a node to tag
690 # grab a node to tag
691 n = nodestotag.pop()
691 n = nodestotag.pop()
692 # Never tag nullid
692 # Never tag nullid
693 if n == nullid:
693 if n == nullid:
694 continue
694 continue
695 # A node's revision number represents its place in a
695 # A node's revision number represents its place in a
696 # topologically sorted list of nodes.
696 # topologically sorted list of nodes.
697 r = self.rev(n)
697 r = self.rev(n)
698 if r >= lowestrev:
698 if r >= lowestrev:
699 if n not in ancestors:
699 if n not in ancestors:
700 # If we are possibly a descendent of one of the roots
700 # If we are possibly a descendent of one of the roots
701 # and we haven't already been marked as an ancestor
701 # and we haven't already been marked as an ancestor
702 ancestors.add(n) # Mark as ancestor
702 ancestors.add(n) # Mark as ancestor
703 # Add non-nullid parents to list of nodes to tag.
703 # Add non-nullid parents to list of nodes to tag.
704 nodestotag.update([p for p in self.parents(n) if
704 nodestotag.update([p for p in self.parents(n) if
705 p != nullid])
705 p != nullid])
706 elif n in heads: # We've seen it before, is it a fake head?
706 elif n in heads: # We've seen it before, is it a fake head?
707 # So it is, real heads should not be the ancestors of
707 # So it is, real heads should not be the ancestors of
708 # any other heads.
708 # any other heads.
709 heads.pop(n)
709 heads.pop(n)
710 if not ancestors:
710 if not ancestors:
711 return nonodes
711 return nonodes
712 # Now that we have our set of ancestors, we want to remove any
712 # Now that we have our set of ancestors, we want to remove any
713 # roots that are not ancestors.
713 # roots that are not ancestors.
714
714
715 # If one of the roots was nullid, everything is included anyway.
715 # If one of the roots was nullid, everything is included anyway.
716 if lowestrev > nullrev:
716 if lowestrev > nullrev:
717 # But, since we weren't, let's recompute the lowest rev to not
717 # But, since we weren't, let's recompute the lowest rev to not
718 # include roots that aren't ancestors.
718 # include roots that aren't ancestors.
719
719
720 # Filter out roots that aren't ancestors of heads
720 # Filter out roots that aren't ancestors of heads
721 roots = [n for n in roots if n in ancestors]
721 roots = [n for n in roots if n in ancestors]
722 # Recompute the lowest revision
722 # Recompute the lowest revision
723 if roots:
723 if roots:
724 lowestrev = min([self.rev(n) for n in roots])
724 lowestrev = min([self.rev(n) for n in roots])
725 else:
725 else:
726 # No more roots? Return empty list
726 # No more roots? Return empty list
727 return nonodes
727 return nonodes
728 else:
728 else:
729 # We are descending from nullid, and don't need to care about
729 # We are descending from nullid, and don't need to care about
730 # any other roots.
730 # any other roots.
731 lowestrev = nullrev
731 lowestrev = nullrev
732 roots = [nullid]
732 roots = [nullid]
733 # Transform our roots list into a set.
733 # Transform our roots list into a set.
734 descendents = set(roots)
734 descendents = set(roots)
735 # Also, keep the original roots so we can filter out roots that aren't
735 # Also, keep the original roots so we can filter out roots that aren't
736 # 'real' roots (i.e. are descended from other roots).
736 # 'real' roots (i.e. are descended from other roots).
737 roots = descendents.copy()
737 roots = descendents.copy()
738 # Our topologically sorted list of output nodes.
738 # Our topologically sorted list of output nodes.
739 orderedout = []
739 orderedout = []
740 # Don't start at nullid since we don't want nullid in our output list,
740 # Don't start at nullid since we don't want nullid in our output list,
741 # and if nullid shows up in descedents, empty parents will look like
741 # and if nullid shows up in descedents, empty parents will look like
742 # they're descendents.
742 # they're descendents.
743 for r in xrange(max(lowestrev, 0), highestrev + 1):
743 for r in xrange(max(lowestrev, 0), highestrev + 1):
744 n = self.node(r)
744 n = self.node(r)
745 isdescendent = False
745 isdescendent = False
746 if lowestrev == nullrev: # Everybody is a descendent of nullid
746 if lowestrev == nullrev: # Everybody is a descendent of nullid
747 isdescendent = True
747 isdescendent = True
748 elif n in descendents:
748 elif n in descendents:
749 # n is already a descendent
749 # n is already a descendent
750 isdescendent = True
750 isdescendent = True
751 # This check only needs to be done here because all the roots
751 # This check only needs to be done here because all the roots
752 # will start being marked is descendents before the loop.
752 # will start being marked is descendents before the loop.
753 if n in roots:
753 if n in roots:
754 # If n was a root, check if it's a 'real' root.
754 # If n was a root, check if it's a 'real' root.
755 p = tuple(self.parents(n))
755 p = tuple(self.parents(n))
756 # If any of its parents are descendents, it's not a root.
756 # If any of its parents are descendents, it's not a root.
757 if (p[0] in descendents) or (p[1] in descendents):
757 if (p[0] in descendents) or (p[1] in descendents):
758 roots.remove(n)
758 roots.remove(n)
759 else:
759 else:
760 p = tuple(self.parents(n))
760 p = tuple(self.parents(n))
761 # A node is a descendent if either of its parents are
761 # A node is a descendent if either of its parents are
762 # descendents. (We seeded the dependents list with the roots
762 # descendents. (We seeded the dependents list with the roots
763 # up there, remember?)
763 # up there, remember?)
764 if (p[0] in descendents) or (p[1] in descendents):
764 if (p[0] in descendents) or (p[1] in descendents):
765 descendents.add(n)
765 descendents.add(n)
766 isdescendent = True
766 isdescendent = True
767 if isdescendent and ((ancestors is None) or (n in ancestors)):
767 if isdescendent and ((ancestors is None) or (n in ancestors)):
768 # Only include nodes that are both descendents and ancestors.
768 # Only include nodes that are both descendents and ancestors.
769 orderedout.append(n)
769 orderedout.append(n)
770 if (ancestors is not None) and (n in heads):
770 if (ancestors is not None) and (n in heads):
771 # We're trying to figure out which heads are reachable
771 # We're trying to figure out which heads are reachable
772 # from roots.
772 # from roots.
773 # Mark this head as having been reached
773 # Mark this head as having been reached
774 heads[n] = 1
774 heads[n] = 1
775 elif ancestors is None:
775 elif ancestors is None:
776 # Otherwise, we're trying to discover the heads.
776 # Otherwise, we're trying to discover the heads.
777 # Assume this is a head because if it isn't, the next step
777 # Assume this is a head because if it isn't, the next step
778 # will eventually remove it.
778 # will eventually remove it.
779 heads[n] = 1
779 heads[n] = 1
780 # But, obviously its parents aren't.
780 # But, obviously its parents aren't.
781 for p in self.parents(n):
781 for p in self.parents(n):
782 heads.pop(p, None)
782 heads.pop(p, None)
783 heads = [n for n in heads.iterkeys() if heads[n] != 0]
783 heads = [n for n in heads.iterkeys() if heads[n] != 0]
784 roots = list(roots)
784 roots = list(roots)
785 assert orderedout
785 assert orderedout
786 assert roots
786 assert roots
787 assert heads
787 assert heads
788 return (orderedout, roots, heads)
788 return (orderedout, roots, heads)
789
789
790 def heads(self, start=None, stop=None):
790 def heads(self, start=None, stop=None):
791 """return the list of all nodes that have no children
791 """return the list of all nodes that have no children
792
792
793 if start is specified, only heads that are descendants of
793 if start is specified, only heads that are descendants of
794 start will be returned
794 start will be returned
795 if stop is specified, it will consider all the revs from stop
795 if stop is specified, it will consider all the revs from stop
796 as if they had no children
796 as if they had no children
797 """
797 """
798 if start is None and stop is None:
798 if start is None and stop is None:
799 count = len(self)
799 count = len(self)
800 if not count:
800 if not count:
801 return [nullid]
801 return [nullid]
802 ishead = [1] * (count + 1)
802 ishead = [1] * (count + 1)
803 index = self.index
803 index = self.index
804 for r in xrange(count):
804 for r in xrange(count):
805 e = index[r]
805 e = index[r]
806 ishead[e[5]] = ishead[e[6]] = 0
806 ishead[e[5]] = ishead[e[6]] = 0
807 return [self.node(r) for r in xrange(count) if ishead[r]]
807 return [self.node(r) for r in xrange(count) if ishead[r]]
808
808
809 if start is None:
809 if start is None:
810 start = nullid
810 start = nullid
811 if stop is None:
811 if stop is None:
812 stop = []
812 stop = []
813 stoprevs = set([self.rev(n) for n in stop])
813 stoprevs = set([self.rev(n) for n in stop])
814 startrev = self.rev(start)
814 startrev = self.rev(start)
815 reachable = set((startrev,))
815 reachable = set((startrev,))
816 heads = set((startrev,))
816 heads = set((startrev,))
817
817
818 parentrevs = self.parentrevs
818 parentrevs = self.parentrevs
819 for r in xrange(startrev + 1, len(self)):
819 for r in xrange(startrev + 1, len(self)):
820 for p in parentrevs(r):
820 for p in parentrevs(r):
821 if p in reachable:
821 if p in reachable:
822 if r not in stoprevs:
822 if r not in stoprevs:
823 reachable.add(r)
823 reachable.add(r)
824 heads.add(r)
824 heads.add(r)
825 if p in heads and p not in stoprevs:
825 if p in heads and p not in stoprevs:
826 heads.remove(p)
826 heads.remove(p)
827
827
828 return [self.node(r) for r in heads]
828 return [self.node(r) for r in heads]
829
829
830 def children(self, node):
830 def children(self, node):
831 """find the children of a given node"""
831 """find the children of a given node"""
832 c = []
832 c = []
833 p = self.rev(node)
833 p = self.rev(node)
834 for r in range(p + 1, len(self)):
834 for r in range(p + 1, len(self)):
835 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
835 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
836 if prevs:
836 if prevs:
837 for pr in prevs:
837 for pr in prevs:
838 if pr == p:
838 if pr == p:
839 c.append(self.node(r))
839 c.append(self.node(r))
840 elif p == nullrev:
840 elif p == nullrev:
841 c.append(self.node(r))
841 c.append(self.node(r))
842 return c
842 return c
843
843
844 def _match(self, id):
844 def _match(self, id):
845 if isinstance(id, (long, int)):
845 if isinstance(id, (long, int)):
846 # rev
846 # rev
847 return self.node(id)
847 return self.node(id)
848 if len(id) == 20:
848 if len(id) == 20:
849 # possibly a binary node
849 # possibly a binary node
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
850 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 try:
851 try:
852 node = id
852 node = id
853 self.rev(node) # quick search the index
853 self.rev(node) # quick search the index
854 return node
854 return node
855 except LookupError:
855 except LookupError:
856 pass # may be partial hex id
856 pass # may be partial hex id
857 try:
857 try:
858 # str(rev)
858 # str(rev)
859 rev = int(id)
859 rev = int(id)
860 if str(rev) != id:
860 if str(rev) != id:
861 raise ValueError
861 raise ValueError
862 if rev < 0:
862 if rev < 0:
863 rev = len(self) + rev
863 rev = len(self) + rev
864 if rev < 0 or rev >= len(self):
864 if rev < 0 or rev >= len(self):
865 raise ValueError
865 raise ValueError
866 return self.node(rev)
866 return self.node(rev)
867 except (ValueError, OverflowError):
867 except (ValueError, OverflowError):
868 pass
868 pass
869 if len(id) == 40:
869 if len(id) == 40:
870 try:
870 try:
871 # a full hex nodeid?
871 # a full hex nodeid?
872 node = bin(id)
872 node = bin(id)
873 self.rev(node)
873 self.rev(node)
874 return node
874 return node
875 except (TypeError, LookupError):
875 except (TypeError, LookupError):
876 pass
876 pass
877
877
878 def _partialmatch(self, id):
878 def _partialmatch(self, id):
879 if len(id) < 40:
879 if len(id) < 40:
880 try:
880 try:
881 # hex(node)[:...]
881 # hex(node)[:...]
882 l = len(id) / 2 # grab an even number of digits
882 l = len(id) / 2 # grab an even number of digits
883 bin_id = bin(id[:l*2])
883 bin_id = bin(id[:l*2])
884 nl = [n for n in self.nodemap if n[:l] == bin_id]
884 nl = [n for n in self.nodemap if n[:l] == bin_id]
885 nl = [n for n in nl if hex(n).startswith(id)]
885 nl = [n for n in nl if hex(n).startswith(id)]
886 if len(nl) > 0:
886 if len(nl) > 0:
887 if len(nl) == 1:
887 if len(nl) == 1:
888 return nl[0]
888 return nl[0]
889 raise LookupError(id, self.indexfile,
889 raise LookupError(id, self.indexfile,
890 _('ambiguous identifier'))
890 _('ambiguous identifier'))
891 return None
891 return None
892 except TypeError:
892 except TypeError:
893 pass
893 pass
894
894
895 def lookup(self, id):
895 def lookup(self, id):
896 """locate a node based on:
896 """locate a node based on:
897 - revision number or str(revision number)
897 - revision number or str(revision number)
898 - nodeid or subset of hex nodeid
898 - nodeid or subset of hex nodeid
899 """
899 """
900 n = self._match(id)
900 n = self._match(id)
901 if n is not None:
901 if n is not None:
902 return n
902 return n
903 n = self._partialmatch(id)
903 n = self._partialmatch(id)
904 if n:
904 if n:
905 return n
905 return n
906
906
907 raise LookupError(id, self.indexfile, _('no match found'))
907 raise LookupError(id, self.indexfile, _('no match found'))
908
908
909 def cmp(self, node, text):
909 def cmp(self, node, text):
910 """compare text with a given file revision"""
910 """compare text with a given file revision"""
911 p1, p2 = self.parents(node)
911 p1, p2 = self.parents(node)
912 return hash(text, p1, p2) != node
912 return hash(text, p1, p2) != node
913
913
914 def _addchunk(self, offset, data):
914 def _addchunk(self, offset, data):
915 o, d = self._chunkcache
915 o, d = self._chunkcache
916 # try to add to existing cache
916 # try to add to existing cache
917 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
917 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
918 self._chunkcache = o, d + data
918 self._chunkcache = o, d + data
919 else:
919 else:
920 self._chunkcache = offset, data
920 self._chunkcache = offset, data
921
921
922 def _loadchunk(self, offset, length, df=None):
922 def _loadchunk(self, offset, length, df=None):
923 if not df:
923 if not df:
924 if self._inline:
924 if self._inline:
925 df = self.opener(self.indexfile)
925 df = self.opener(self.indexfile)
926 else:
926 else:
927 df = self.opener(self.datafile)
927 df = self.opener(self.datafile)
928
928
929 readahead = max(65536, length)
929 readahead = max(65536, length)
930 df.seek(offset)
930 df.seek(offset)
931 d = df.read(readahead)
931 d = df.read(readahead)
932 self._addchunk(offset, d)
932 self._addchunk(offset, d)
933 if readahead > length:
933 if readahead > length:
934 return d[:length]
934 return d[:length]
935 return d
935 return d
936
936
937 def _getchunk(self, offset, length, df=None):
937 def _getchunk(self, offset, length, df=None):
938 o, d = self._chunkcache
938 o, d = self._chunkcache
939 l = len(d)
939 l = len(d)
940
940
941 # is it in the cache?
941 # is it in the cache?
942 cachestart = offset - o
942 cachestart = offset - o
943 cacheend = cachestart + length
943 cacheend = cachestart + length
944 if cachestart >= 0 and cacheend <= l:
944 if cachestart >= 0 and cacheend <= l:
945 if cachestart == 0 and cacheend == l:
945 if cachestart == 0 and cacheend == l:
946 return d # avoid a copy
946 return d # avoid a copy
947 return d[cachestart:cacheend]
947 return d[cachestart:cacheend]
948
948
949 return self._loadchunk(offset, length, df)
949 return self._loadchunk(offset, length, df)
950
950
951 def _prime(self, startrev, endrev, df):
951 def _prime(self, startrev, endrev, df):
952 start = self.start(startrev)
952 start = self.start(startrev)
953 end = self.end(endrev)
953 end = self.end(endrev)
954 if self._inline:
954 if self._inline:
955 start += (startrev + 1) * self._io.size
955 start += (startrev + 1) * self._io.size
956 end += (startrev + 1) * self._io.size
956 end += (startrev + 1) * self._io.size
957 self._loadchunk(start, end - start, df)
957 self._loadchunk(start, end - start, df)
958
958
959 def chunk(self, rev, df=None):
959 def chunk(self, rev, df=None):
960 start, length = self.start(rev), self.length(rev)
960 start, length = self.start(rev), self.length(rev)
961 if self._inline:
961 if self._inline:
962 start += (rev + 1) * self._io.size
962 start += (rev + 1) * self._io.size
963 return decompress(self._getchunk(start, length, df))
963 return decompress(self._getchunk(start, length, df))
964
964
965 def revdiff(self, rev1, rev2):
965 def revdiff(self, rev1, rev2):
966 """return or calculate a delta between two revisions"""
966 """return or calculate a delta between two revisions"""
967 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
967 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
968 return self.chunk(rev2)
968 return self.chunk(rev2)
969
969
970 return mdiff.textdiff(self.revision(self.node(rev1)),
970 return mdiff.textdiff(self.revision(self.node(rev1)),
971 self.revision(self.node(rev2)))
971 self.revision(self.node(rev2)))
972
972
973 def revision(self, node):
973 def revision(self, node):
974 """return an uncompressed revision of a given node"""
974 """return an uncompressed revision of a given node"""
975 if node == nullid:
975 if node == nullid:
976 return ""
976 return ""
977 if self._cache and self._cache[0] == node:
977 if self._cache and self._cache[0] == node:
978 return str(self._cache[2])
978 return str(self._cache[2])
979
979
980 # look up what we need to read
980 # look up what we need to read
981 text = None
981 text = None
982 rev = self.rev(node)
982 rev = self.rev(node)
983 base = self.base(rev)
983 base = self.base(rev)
984
984
985 # check rev flags
985 # check rev flags
986 if self.index[rev][0] & 0xFFFF:
986 if self.index[rev][0] & 0xFFFF:
987 raise RevlogError(_('incompatible revision flag %x') %
987 raise RevlogError(_('incompatible revision flag %x') %
988 (self.index[rev][0] & 0xFFFF))
988 (self.index[rev][0] & 0xFFFF))
989
989
990 df = None
990 df = None
991
991
992 # do we have useful data cached?
992 # do we have useful data cached?
993 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
993 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
994 base = self._cache[1]
994 base = self._cache[1]
995 text = str(self._cache[2])
995 text = str(self._cache[2])
996 self._loadindex(base, rev + 1)
996 self._loadindex(base, rev + 1)
997 if not self._inline and rev > base + 1:
997 if not self._inline and rev > base + 1:
998 df = self.opener(self.datafile)
998 df = self.opener(self.datafile)
999 self._prime(base, rev, df)
999 self._prime(base, rev, df)
1000 else:
1000 else:
1001 self._loadindex(base, rev + 1)
1001 self._loadindex(base, rev + 1)
1002 if not self._inline and rev > base:
1002 if not self._inline and rev > base:
1003 df = self.opener(self.datafile)
1003 df = self.opener(self.datafile)
1004 self._prime(base, rev, df)
1004 self._prime(base, rev, df)
1005 text = self.chunk(base, df=df)
1005 text = self.chunk(base, df=df)
1006
1006
1007 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
1007 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
1008 text = mdiff.patches(text, bins)
1008 text = mdiff.patches(text, bins)
1009 p1, p2 = self.parents(node)
1009 p1, p2 = self.parents(node)
1010 if node != hash(text, p1, p2):
1010 if node != hash(text, p1, p2):
1011 raise RevlogError(_("integrity check failed on %s:%d")
1011 raise RevlogError(_("integrity check failed on %s:%d")
1012 % (self.datafile, rev))
1012 % (self.datafile, rev))
1013
1013
1014 self._cache = (node, rev, text)
1014 self._cache = (node, rev, text)
1015 return text
1015 return text
1016
1016
1017 def checkinlinesize(self, tr, fp=None):
1017 def checkinlinesize(self, tr, fp=None):
1018 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1018 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1019 return
1019 return
1020
1020
1021 trinfo = tr.find(self.indexfile)
1021 trinfo = tr.find(self.indexfile)
1022 if trinfo == None:
1022 if trinfo is None:
1023 raise RevlogError(_("%s not found in the transaction")
1023 raise RevlogError(_("%s not found in the transaction")
1024 % self.indexfile)
1024 % self.indexfile)
1025
1025
1026 trindex = trinfo[2]
1026 trindex = trinfo[2]
1027 dataoff = self.start(trindex)
1027 dataoff = self.start(trindex)
1028
1028
1029 tr.add(self.datafile, dataoff)
1029 tr.add(self.datafile, dataoff)
1030
1030
1031 if fp:
1031 if fp:
1032 fp.flush()
1032 fp.flush()
1033 fp.close()
1033 fp.close()
1034
1034
1035 df = self.opener(self.datafile, 'w')
1035 df = self.opener(self.datafile, 'w')
1036 try:
1036 try:
1037 calc = self._io.size
1037 calc = self._io.size
1038 for r in self:
1038 for r in self:
1039 start = self.start(r) + (r + 1) * calc
1039 start = self.start(r) + (r + 1) * calc
1040 length = self.length(r)
1040 length = self.length(r)
1041 d = self._getchunk(start, length)
1041 d = self._getchunk(start, length)
1042 df.write(d)
1042 df.write(d)
1043 finally:
1043 finally:
1044 df.close()
1044 df.close()
1045
1045
1046 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1046 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1047 self.version &= ~(REVLOGNGINLINEDATA)
1047 self.version &= ~(REVLOGNGINLINEDATA)
1048 self._inline = False
1048 self._inline = False
1049 for i in self:
1049 for i in self:
1050 e = self._io.packentry(self.index[i], self.node, self.version, i)
1050 e = self._io.packentry(self.index[i], self.node, self.version, i)
1051 fp.write(e)
1051 fp.write(e)
1052
1052
1053 # if we don't call rename, the temp file will never replace the
1053 # if we don't call rename, the temp file will never replace the
1054 # real index
1054 # real index
1055 fp.rename()
1055 fp.rename()
1056
1056
1057 tr.replace(self.indexfile, trindex * calc)
1057 tr.replace(self.indexfile, trindex * calc)
1058 self._chunkcache = (0, '')
1058 self._chunkcache = (0, '')
1059
1059
1060 def addrevision(self, text, transaction, link, p1, p2, d=None):
1060 def addrevision(self, text, transaction, link, p1, p2, d=None):
1061 """add a revision to the log
1061 """add a revision to the log
1062
1062
1063 text - the revision data to add
1063 text - the revision data to add
1064 transaction - the transaction object used for rollback
1064 transaction - the transaction object used for rollback
1065 link - the linkrev data to add
1065 link - the linkrev data to add
1066 p1, p2 - the parent nodeids of the revision
1066 p1, p2 - the parent nodeids of the revision
1067 d - an optional precomputed delta
1067 d - an optional precomputed delta
1068 """
1068 """
1069 dfh = None
1069 dfh = None
1070 if not self._inline:
1070 if not self._inline:
1071 dfh = self.opener(self.datafile, "a")
1071 dfh = self.opener(self.datafile, "a")
1072 ifh = self.opener(self.indexfile, "a+")
1072 ifh = self.opener(self.indexfile, "a+")
1073 try:
1073 try:
1074 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1074 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1075 finally:
1075 finally:
1076 if dfh:
1076 if dfh:
1077 dfh.close()
1077 dfh.close()
1078 ifh.close()
1078 ifh.close()
1079
1079
1080 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1080 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1081 node = hash(text, p1, p2)
1081 node = hash(text, p1, p2)
1082 if node in self.nodemap:
1082 if node in self.nodemap:
1083 return node
1083 return node
1084
1084
1085 curr = len(self)
1085 curr = len(self)
1086 prev = curr - 1
1086 prev = curr - 1
1087 base = self.base(prev)
1087 base = self.base(prev)
1088 offset = self.end(prev)
1088 offset = self.end(prev)
1089
1089
1090 if curr:
1090 if curr:
1091 if not d:
1091 if not d:
1092 ptext = self.revision(self.node(prev))
1092 ptext = self.revision(self.node(prev))
1093 d = mdiff.textdiff(ptext, text)
1093 d = mdiff.textdiff(ptext, text)
1094 data = compress(d)
1094 data = compress(d)
1095 l = len(data[1]) + len(data[0])
1095 l = len(data[1]) + len(data[0])
1096 dist = l + offset - self.start(base)
1096 dist = l + offset - self.start(base)
1097
1097
1098 # full versions are inserted when the needed deltas
1098 # full versions are inserted when the needed deltas
1099 # become comparable to the uncompressed text
1099 # become comparable to the uncompressed text
1100 if not curr or dist > len(text) * 2:
1100 if not curr or dist > len(text) * 2:
1101 data = compress(text)
1101 data = compress(text)
1102 l = len(data[1]) + len(data[0])
1102 l = len(data[1]) + len(data[0])
1103 base = curr
1103 base = curr
1104
1104
1105 e = (offset_type(offset, 0), l, len(text),
1105 e = (offset_type(offset, 0), l, len(text),
1106 base, link, self.rev(p1), self.rev(p2), node)
1106 base, link, self.rev(p1), self.rev(p2), node)
1107 self.index.insert(-1, e)
1107 self.index.insert(-1, e)
1108 self.nodemap[node] = curr
1108 self.nodemap[node] = curr
1109
1109
1110 entry = self._io.packentry(e, self.node, self.version, curr)
1110 entry = self._io.packentry(e, self.node, self.version, curr)
1111 if not self._inline:
1111 if not self._inline:
1112 transaction.add(self.datafile, offset)
1112 transaction.add(self.datafile, offset)
1113 transaction.add(self.indexfile, curr * len(entry))
1113 transaction.add(self.indexfile, curr * len(entry))
1114 if data[0]:
1114 if data[0]:
1115 dfh.write(data[0])
1115 dfh.write(data[0])
1116 dfh.write(data[1])
1116 dfh.write(data[1])
1117 dfh.flush()
1117 dfh.flush()
1118 ifh.write(entry)
1118 ifh.write(entry)
1119 else:
1119 else:
1120 offset += curr * self._io.size
1120 offset += curr * self._io.size
1121 transaction.add(self.indexfile, offset, curr)
1121 transaction.add(self.indexfile, offset, curr)
1122 ifh.write(entry)
1122 ifh.write(entry)
1123 ifh.write(data[0])
1123 ifh.write(data[0])
1124 ifh.write(data[1])
1124 ifh.write(data[1])
1125 self.checkinlinesize(transaction, ifh)
1125 self.checkinlinesize(transaction, ifh)
1126
1126
1127 self._cache = (node, curr, text)
1127 self._cache = (node, curr, text)
1128 return node
1128 return node
1129
1129
1130 def ancestor(self, a, b):
1130 def ancestor(self, a, b):
1131 """calculate the least common ancestor of nodes a and b"""
1131 """calculate the least common ancestor of nodes a and b"""
1132
1132
1133 def parents(rev):
1133 def parents(rev):
1134 return [p for p in self.parentrevs(rev) if p != nullrev]
1134 return [p for p in self.parentrevs(rev) if p != nullrev]
1135
1135
1136 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1136 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1137 if c is None:
1137 if c is None:
1138 return nullid
1138 return nullid
1139
1139
1140 return self.node(c)
1140 return self.node(c)
1141
1141
1142 def group(self, nodelist, lookup, infocollect=None):
1142 def group(self, nodelist, lookup, infocollect=None):
1143 """calculate a delta group
1143 """calculate a delta group
1144
1144
1145 Given a list of changeset revs, return a set of deltas and
1145 Given a list of changeset revs, return a set of deltas and
1146 metadata corresponding to nodes. the first delta is
1146 metadata corresponding to nodes. the first delta is
1147 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1147 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1148 have this parent as it has all history before these
1148 have this parent as it has all history before these
1149 changesets. parent is parent[0]
1149 changesets. parent is parent[0]
1150 """
1150 """
1151
1151
1152 # if we don't have any revisions touched by these changesets, bail
1152 # if we don't have any revisions touched by these changesets, bail
1153 if not nodelist:
1153 if not nodelist:
1154 yield changegroup.closechunk()
1154 yield changegroup.closechunk()
1155 return
1155 return
1156
1156
1157 revs = [self.rev(n) for n in nodelist]
1157 revs = [self.rev(n) for n in nodelist]
1158
1158
1159 # add the parent of the first rev
1159 # add the parent of the first rev
1160 p = self.parentrevs(revs[0])[0]
1160 p = self.parentrevs(revs[0])[0]
1161 revs.insert(0, p)
1161 revs.insert(0, p)
1162
1162
1163 # build deltas
1163 # build deltas
1164 for d in xrange(0, len(revs) - 1):
1164 for d in xrange(0, len(revs) - 1):
1165 a, b = revs[d], revs[d + 1]
1165 a, b = revs[d], revs[d + 1]
1166 nb = self.node(b)
1166 nb = self.node(b)
1167
1167
1168 if infocollect is not None:
1168 if infocollect is not None:
1169 infocollect(nb)
1169 infocollect(nb)
1170
1170
1171 p = self.parents(nb)
1171 p = self.parents(nb)
1172 meta = nb + p[0] + p[1] + lookup(nb)
1172 meta = nb + p[0] + p[1] + lookup(nb)
1173 if a == -1:
1173 if a == -1:
1174 d = self.revision(nb)
1174 d = self.revision(nb)
1175 meta += mdiff.trivialdiffheader(len(d))
1175 meta += mdiff.trivialdiffheader(len(d))
1176 else:
1176 else:
1177 d = self.revdiff(a, b)
1177 d = self.revdiff(a, b)
1178 yield changegroup.chunkheader(len(meta) + len(d))
1178 yield changegroup.chunkheader(len(meta) + len(d))
1179 yield meta
1179 yield meta
1180 if len(d) > 2**20:
1180 if len(d) > 2**20:
1181 pos = 0
1181 pos = 0
1182 while pos < len(d):
1182 while pos < len(d):
1183 pos2 = pos + 2 ** 18
1183 pos2 = pos + 2 ** 18
1184 yield d[pos:pos2]
1184 yield d[pos:pos2]
1185 pos = pos2
1185 pos = pos2
1186 else:
1186 else:
1187 yield d
1187 yield d
1188
1188
1189 yield changegroup.closechunk()
1189 yield changegroup.closechunk()
1190
1190
1191 def addgroup(self, revs, linkmapper, transaction):
1191 def addgroup(self, revs, linkmapper, transaction):
1192 """
1192 """
1193 add a delta group
1193 add a delta group
1194
1194
1195 given a set of deltas, add them to the revision log. the
1195 given a set of deltas, add them to the revision log. the
1196 first delta is against its parent, which should be in our
1196 first delta is against its parent, which should be in our
1197 log, the rest are against the previous delta.
1197 log, the rest are against the previous delta.
1198 """
1198 """
1199
1199
1200 #track the base of the current delta log
1200 #track the base of the current delta log
1201 r = len(self)
1201 r = len(self)
1202 t = r - 1
1202 t = r - 1
1203 node = None
1203 node = None
1204
1204
1205 base = prev = nullrev
1205 base = prev = nullrev
1206 start = end = textlen = 0
1206 start = end = textlen = 0
1207 if r:
1207 if r:
1208 end = self.end(t)
1208 end = self.end(t)
1209
1209
1210 ifh = self.opener(self.indexfile, "a+")
1210 ifh = self.opener(self.indexfile, "a+")
1211 isize = r * self._io.size
1211 isize = r * self._io.size
1212 if self._inline:
1212 if self._inline:
1213 transaction.add(self.indexfile, end + isize, r)
1213 transaction.add(self.indexfile, end + isize, r)
1214 dfh = None
1214 dfh = None
1215 else:
1215 else:
1216 transaction.add(self.indexfile, isize, r)
1216 transaction.add(self.indexfile, isize, r)
1217 transaction.add(self.datafile, end)
1217 transaction.add(self.datafile, end)
1218 dfh = self.opener(self.datafile, "a")
1218 dfh = self.opener(self.datafile, "a")
1219
1219
1220 try:
1220 try:
1221 # loop through our set of deltas
1221 # loop through our set of deltas
1222 chain = None
1222 chain = None
1223 for chunk in revs:
1223 for chunk in revs:
1224 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1224 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1225 link = linkmapper(cs)
1225 link = linkmapper(cs)
1226 if node in self.nodemap:
1226 if node in self.nodemap:
1227 # this can happen if two branches make the same change
1227 # this can happen if two branches make the same change
1228 chain = node
1228 chain = node
1229 continue
1229 continue
1230 delta = buffer(chunk, 80)
1230 delta = buffer(chunk, 80)
1231 del chunk
1231 del chunk
1232
1232
1233 for p in (p1, p2):
1233 for p in (p1, p2):
1234 if not p in self.nodemap:
1234 if not p in self.nodemap:
1235 raise LookupError(p, self.indexfile, _('unknown parent'))
1235 raise LookupError(p, self.indexfile, _('unknown parent'))
1236
1236
1237 if not chain:
1237 if not chain:
1238 # retrieve the parent revision of the delta chain
1238 # retrieve the parent revision of the delta chain
1239 chain = p1
1239 chain = p1
1240 if not chain in self.nodemap:
1240 if not chain in self.nodemap:
1241 raise LookupError(chain, self.indexfile, _('unknown base'))
1241 raise LookupError(chain, self.indexfile, _('unknown base'))
1242
1242
1243 # full versions are inserted when the needed deltas become
1243 # full versions are inserted when the needed deltas become
1244 # comparable to the uncompressed text or when the previous
1244 # comparable to the uncompressed text or when the previous
1245 # version is not the one we have a delta against. We use
1245 # version is not the one we have a delta against. We use
1246 # the size of the previous full rev as a proxy for the
1246 # the size of the previous full rev as a proxy for the
1247 # current size.
1247 # current size.
1248
1248
1249 if chain == prev:
1249 if chain == prev:
1250 cdelta = compress(delta)
1250 cdelta = compress(delta)
1251 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1251 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1252 textlen = mdiff.patchedsize(textlen, delta)
1252 textlen = mdiff.patchedsize(textlen, delta)
1253
1253
1254 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1254 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1255 # flush our writes here so we can read it in revision
1255 # flush our writes here so we can read it in revision
1256 if dfh:
1256 if dfh:
1257 dfh.flush()
1257 dfh.flush()
1258 ifh.flush()
1258 ifh.flush()
1259 text = self.revision(chain)
1259 text = self.revision(chain)
1260 if len(text) == 0:
1260 if len(text) == 0:
1261 # skip over trivial delta header
1261 # skip over trivial delta header
1262 text = buffer(delta, 12)
1262 text = buffer(delta, 12)
1263 else:
1263 else:
1264 text = mdiff.patches(text, [delta])
1264 text = mdiff.patches(text, [delta])
1265 del delta
1265 del delta
1266 chk = self._addrevision(text, transaction, link, p1, p2, None,
1266 chk = self._addrevision(text, transaction, link, p1, p2, None,
1267 ifh, dfh)
1267 ifh, dfh)
1268 if not dfh and not self._inline:
1268 if not dfh and not self._inline:
1269 # addrevision switched from inline to conventional
1269 # addrevision switched from inline to conventional
1270 # reopen the index
1270 # reopen the index
1271 dfh = self.opener(self.datafile, "a")
1271 dfh = self.opener(self.datafile, "a")
1272 ifh = self.opener(self.indexfile, "a")
1272 ifh = self.opener(self.indexfile, "a")
1273 if chk != node:
1273 if chk != node:
1274 raise RevlogError(_("consistency error adding group"))
1274 raise RevlogError(_("consistency error adding group"))
1275 textlen = len(text)
1275 textlen = len(text)
1276 else:
1276 else:
1277 e = (offset_type(end, 0), cdeltalen, textlen, base,
1277 e = (offset_type(end, 0), cdeltalen, textlen, base,
1278 link, self.rev(p1), self.rev(p2), node)
1278 link, self.rev(p1), self.rev(p2), node)
1279 self.index.insert(-1, e)
1279 self.index.insert(-1, e)
1280 self.nodemap[node] = r
1280 self.nodemap[node] = r
1281 entry = self._io.packentry(e, self.node, self.version, r)
1281 entry = self._io.packentry(e, self.node, self.version, r)
1282 if self._inline:
1282 if self._inline:
1283 ifh.write(entry)
1283 ifh.write(entry)
1284 ifh.write(cdelta[0])
1284 ifh.write(cdelta[0])
1285 ifh.write(cdelta[1])
1285 ifh.write(cdelta[1])
1286 self.checkinlinesize(transaction, ifh)
1286 self.checkinlinesize(transaction, ifh)
1287 if not self._inline:
1287 if not self._inline:
1288 dfh = self.opener(self.datafile, "a")
1288 dfh = self.opener(self.datafile, "a")
1289 ifh = self.opener(self.indexfile, "a")
1289 ifh = self.opener(self.indexfile, "a")
1290 else:
1290 else:
1291 dfh.write(cdelta[0])
1291 dfh.write(cdelta[0])
1292 dfh.write(cdelta[1])
1292 dfh.write(cdelta[1])
1293 ifh.write(entry)
1293 ifh.write(entry)
1294
1294
1295 t, r, chain, prev = r, r + 1, node, node
1295 t, r, chain, prev = r, r + 1, node, node
1296 base = self.base(t)
1296 base = self.base(t)
1297 start = self.start(base)
1297 start = self.start(base)
1298 end = self.end(t)
1298 end = self.end(t)
1299 finally:
1299 finally:
1300 if dfh:
1300 if dfh:
1301 dfh.close()
1301 dfh.close()
1302 ifh.close()
1302 ifh.close()
1303
1303
1304 return node
1304 return node
1305
1305
1306 def strip(self, minlink, transaction):
1306 def strip(self, minlink, transaction):
1307 """truncate the revlog on the first revision with a linkrev >= minlink
1307 """truncate the revlog on the first revision with a linkrev >= minlink
1308
1308
1309 This function is called when we're stripping revision minlink and
1309 This function is called when we're stripping revision minlink and
1310 its descendants from the repository.
1310 its descendants from the repository.
1311
1311
1312 We have to remove all revisions with linkrev >= minlink, because
1312 We have to remove all revisions with linkrev >= minlink, because
1313 the equivalent changelog revisions will be renumbered after the
1313 the equivalent changelog revisions will be renumbered after the
1314 strip.
1314 strip.
1315
1315
1316 So we truncate the revlog on the first of these revisions, and
1316 So we truncate the revlog on the first of these revisions, and
1317 trust that the caller has saved the revisions that shouldn't be
1317 trust that the caller has saved the revisions that shouldn't be
1318 removed and that it'll readd them after this truncation.
1318 removed and that it'll readd them after this truncation.
1319 """
1319 """
1320 if len(self) == 0:
1320 if len(self) == 0:
1321 return
1321 return
1322
1322
1323 if isinstance(self.index, lazyindex):
1323 if isinstance(self.index, lazyindex):
1324 self._loadindexmap()
1324 self._loadindexmap()
1325
1325
1326 for rev in self:
1326 for rev in self:
1327 if self.index[rev][4] >= minlink:
1327 if self.index[rev][4] >= minlink:
1328 break
1328 break
1329 else:
1329 else:
1330 return
1330 return
1331
1331
1332 # first truncate the files on disk
1332 # first truncate the files on disk
1333 end = self.start(rev)
1333 end = self.start(rev)
1334 if not self._inline:
1334 if not self._inline:
1335 transaction.add(self.datafile, end)
1335 transaction.add(self.datafile, end)
1336 end = rev * self._io.size
1336 end = rev * self._io.size
1337 else:
1337 else:
1338 end += rev * self._io.size
1338 end += rev * self._io.size
1339
1339
1340 transaction.add(self.indexfile, end)
1340 transaction.add(self.indexfile, end)
1341
1341
1342 # then reset internal state in memory to forget those revisions
1342 # then reset internal state in memory to forget those revisions
1343 self._cache = None
1343 self._cache = None
1344 self._chunkcache = (0, '')
1344 self._chunkcache = (0, '')
1345 for x in xrange(rev, len(self)):
1345 for x in xrange(rev, len(self)):
1346 del self.nodemap[self.node(x)]
1346 del self.nodemap[self.node(x)]
1347
1347
1348 del self.index[rev:-1]
1348 del self.index[rev:-1]
1349
1349
1350 def checksize(self):
1350 def checksize(self):
1351 expected = 0
1351 expected = 0
1352 if len(self):
1352 if len(self):
1353 expected = max(0, self.end(len(self) - 1))
1353 expected = max(0, self.end(len(self) - 1))
1354
1354
1355 try:
1355 try:
1356 f = self.opener(self.datafile)
1356 f = self.opener(self.datafile)
1357 f.seek(0, 2)
1357 f.seek(0, 2)
1358 actual = f.tell()
1358 actual = f.tell()
1359 dd = actual - expected
1359 dd = actual - expected
1360 except IOError, inst:
1360 except IOError, inst:
1361 if inst.errno != errno.ENOENT:
1361 if inst.errno != errno.ENOENT:
1362 raise
1362 raise
1363 dd = 0
1363 dd = 0
1364
1364
1365 try:
1365 try:
1366 f = self.opener(self.indexfile)
1366 f = self.opener(self.indexfile)
1367 f.seek(0, 2)
1367 f.seek(0, 2)
1368 actual = f.tell()
1368 actual = f.tell()
1369 s = self._io.size
1369 s = self._io.size
1370 i = max(0, actual / s)
1370 i = max(0, actual / s)
1371 di = actual - (i * s)
1371 di = actual - (i * s)
1372 if self._inline:
1372 if self._inline:
1373 databytes = 0
1373 databytes = 0
1374 for r in self:
1374 for r in self:
1375 databytes += max(0, self.length(r))
1375 databytes += max(0, self.length(r))
1376 dd = 0
1376 dd = 0
1377 di = actual - len(self) * s - databytes
1377 di = actual - len(self) * s - databytes
1378 except IOError, inst:
1378 except IOError, inst:
1379 if inst.errno != errno.ENOENT:
1379 if inst.errno != errno.ENOENT:
1380 raise
1380 raise
1381 di = 0
1381 di = 0
1382
1382
1383 return (dd, di)
1383 return (dd, di)
1384
1384
1385 def files(self):
1385 def files(self):
1386 res = [ self.indexfile ]
1386 res = [ self.indexfile ]
1387 if not self._inline:
1387 if not self._inline:
1388 res.append(self.datafile)
1388 res.append(self.datafile)
1389 return res
1389 return res
@@ -1,343 +1,343 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import errno, getpass, os, re, socket, sys, tempfile, traceback
9 import errno, getpass, os, re, socket, sys, tempfile, traceback
10 import config, util, error
10 import config, util, error
11
11
12 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True,
12 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True,
13 '0': False, 'no': False, 'false': False, 'off': False}
13 '0': False, 'no': False, 'false': False, 'off': False}
14
14
15 class ui(object):
15 class ui(object):
16 def __init__(self, src=None):
16 def __init__(self, src=None):
17 self._buffers = []
17 self._buffers = []
18 self.quiet = self.verbose = self.debugflag = self._traceback = False
18 self.quiet = self.verbose = self.debugflag = self._traceback = False
19 self._reportuntrusted = True
19 self._reportuntrusted = True
20 self._ocfg = config.config() # overlay
20 self._ocfg = config.config() # overlay
21 self._tcfg = config.config() # trusted
21 self._tcfg = config.config() # trusted
22 self._ucfg = config.config() # untrusted
22 self._ucfg = config.config() # untrusted
23 self._trustusers = set()
23 self._trustusers = set()
24 self._trustgroups = set()
24 self._trustgroups = set()
25
25
26 if src:
26 if src:
27 self._tcfg = src._tcfg.copy()
27 self._tcfg = src._tcfg.copy()
28 self._ucfg = src._ucfg.copy()
28 self._ucfg = src._ucfg.copy()
29 self._ocfg = src._ocfg.copy()
29 self._ocfg = src._ocfg.copy()
30 self._trustusers = src._trustusers.copy()
30 self._trustusers = src._trustusers.copy()
31 self._trustgroups = src._trustgroups.copy()
31 self._trustgroups = src._trustgroups.copy()
32 self.fixconfig()
32 self.fixconfig()
33 else:
33 else:
34 # we always trust global config files
34 # we always trust global config files
35 for f in util.rcpath():
35 for f in util.rcpath():
36 self.readconfig(f, trust=True)
36 self.readconfig(f, trust=True)
37
37
38 def copy(self):
38 def copy(self):
39 return self.__class__(self)
39 return self.__class__(self)
40
40
41 def _is_trusted(self, fp, f):
41 def _is_trusted(self, fp, f):
42 st = util.fstat(fp)
42 st = util.fstat(fp)
43 if util.isowner(fp, st):
43 if util.isowner(fp, st):
44 return True
44 return True
45
45
46 tusers, tgroups = self._trustusers, self._trustgroups
46 tusers, tgroups = self._trustusers, self._trustgroups
47 if '*' in tusers or '*' in tgroups:
47 if '*' in tusers or '*' in tgroups:
48 return True
48 return True
49
49
50 user = util.username(st.st_uid)
50 user = util.username(st.st_uid)
51 group = util.groupname(st.st_gid)
51 group = util.groupname(st.st_gid)
52 if user in tusers or group in tgroups or user == util.username():
52 if user in tusers or group in tgroups or user == util.username():
53 return True
53 return True
54
54
55 if self._reportuntrusted:
55 if self._reportuntrusted:
56 self.warn(_('Not trusting file %s from untrusted '
56 self.warn(_('Not trusting file %s from untrusted '
57 'user %s, group %s\n') % (f, user, group))
57 'user %s, group %s\n') % (f, user, group))
58 return False
58 return False
59
59
60 def readconfig(self, filename, root=None, trust=False,
60 def readconfig(self, filename, root=None, trust=False,
61 sections=None, remap=None):
61 sections=None, remap=None):
62 try:
62 try:
63 fp = open(filename)
63 fp = open(filename)
64 except IOError:
64 except IOError:
65 if not sections: # ignore unless we were looking for something
65 if not sections: # ignore unless we were looking for something
66 return
66 return
67 raise
67 raise
68
68
69 cfg = config.config()
69 cfg = config.config()
70 trusted = sections or trust or self._is_trusted(fp, filename)
70 trusted = sections or trust or self._is_trusted(fp, filename)
71
71
72 try:
72 try:
73 cfg.read(filename, fp, sections=sections, remap=remap)
73 cfg.read(filename, fp, sections=sections, remap=remap)
74 except error.ConfigError, inst:
74 except error.ConfigError, inst:
75 if trusted:
75 if trusted:
76 raise
76 raise
77 self.warn(_("Ignored: %s\n") % str(inst))
77 self.warn(_("Ignored: %s\n") % str(inst))
78
78
79 if trusted:
79 if trusted:
80 self._tcfg.update(cfg)
80 self._tcfg.update(cfg)
81 self._tcfg.update(self._ocfg)
81 self._tcfg.update(self._ocfg)
82 self._ucfg.update(cfg)
82 self._ucfg.update(cfg)
83 self._ucfg.update(self._ocfg)
83 self._ucfg.update(self._ocfg)
84
84
85 if root is None:
85 if root is None:
86 root = os.path.expanduser('~')
86 root = os.path.expanduser('~')
87 self.fixconfig(root=root)
87 self.fixconfig(root=root)
88
88
89 def fixconfig(self, root=None):
89 def fixconfig(self, root=None):
90 # translate paths relative to root (or home) into absolute paths
90 # translate paths relative to root (or home) into absolute paths
91 root = root or os.getcwd()
91 root = root or os.getcwd()
92 for c in self._tcfg, self._ucfg, self._ocfg:
92 for c in self._tcfg, self._ucfg, self._ocfg:
93 for n, p in c.items('paths'):
93 for n, p in c.items('paths'):
94 if p and "://" not in p and not os.path.isabs(p):
94 if p and "://" not in p and not os.path.isabs(p):
95 c.set("paths", n, os.path.normpath(os.path.join(root, p)))
95 c.set("paths", n, os.path.normpath(os.path.join(root, p)))
96
96
97 # update ui options
97 # update ui options
98 self.debugflag = self.configbool('ui', 'debug')
98 self.debugflag = self.configbool('ui', 'debug')
99 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
99 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
100 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
100 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
101 if self.verbose and self.quiet:
101 if self.verbose and self.quiet:
102 self.quiet = self.verbose = False
102 self.quiet = self.verbose = False
103 self._reportuntrusted = self.configbool("ui", "report_untrusted", True)
103 self._reportuntrusted = self.configbool("ui", "report_untrusted", True)
104 self._traceback = self.configbool('ui', 'traceback', False)
104 self._traceback = self.configbool('ui', 'traceback', False)
105
105
106 # update trust information
106 # update trust information
107 self._trustusers.update(self.configlist('trusted', 'users'))
107 self._trustusers.update(self.configlist('trusted', 'users'))
108 self._trustgroups.update(self.configlist('trusted', 'groups'))
108 self._trustgroups.update(self.configlist('trusted', 'groups'))
109
109
110 def setconfig(self, section, name, value):
110 def setconfig(self, section, name, value):
111 for cfg in (self._ocfg, self._tcfg, self._ucfg):
111 for cfg in (self._ocfg, self._tcfg, self._ucfg):
112 cfg.set(section, name, value)
112 cfg.set(section, name, value)
113 self.fixconfig()
113 self.fixconfig()
114
114
115 def _data(self, untrusted):
115 def _data(self, untrusted):
116 return untrusted and self._ucfg or self._tcfg
116 return untrusted and self._ucfg or self._tcfg
117
117
118 def configsource(self, section, name, untrusted=False):
118 def configsource(self, section, name, untrusted=False):
119 return self._data(untrusted).source(section, name) or 'none'
119 return self._data(untrusted).source(section, name) or 'none'
120
120
121 def config(self, section, name, default=None, untrusted=False):
121 def config(self, section, name, default=None, untrusted=False):
122 value = self._data(untrusted).get(section, name, default)
122 value = self._data(untrusted).get(section, name, default)
123 if self.debugflag and not untrusted and self._reportuntrusted:
123 if self.debugflag and not untrusted and self._reportuntrusted:
124 uvalue = self._ucfg.get(section, name)
124 uvalue = self._ucfg.get(section, name)
125 if uvalue is not None and uvalue != value:
125 if uvalue is not None and uvalue != value:
126 self.debug(_("ignoring untrusted configuration option "
126 self.debug(_("ignoring untrusted configuration option "
127 "%s.%s = %s\n") % (section, name, uvalue))
127 "%s.%s = %s\n") % (section, name, uvalue))
128 return value
128 return value
129
129
130 def configbool(self, section, name, default=False, untrusted=False):
130 def configbool(self, section, name, default=False, untrusted=False):
131 v = self.config(section, name, None, untrusted)
131 v = self.config(section, name, None, untrusted)
132 if v == None:
132 if v is None:
133 return default
133 return default
134 if v.lower() not in _booleans:
134 if v.lower() not in _booleans:
135 raise error.ConfigError(_("%s.%s not a boolean ('%s')")
135 raise error.ConfigError(_("%s.%s not a boolean ('%s')")
136 % (section, name, v))
136 % (section, name, v))
137 return _booleans[v.lower()]
137 return _booleans[v.lower()]
138
138
139 def configlist(self, section, name, default=None, untrusted=False):
139 def configlist(self, section, name, default=None, untrusted=False):
140 """Return a list of comma/space separated strings"""
140 """Return a list of comma/space separated strings"""
141 result = self.config(section, name, untrusted=untrusted)
141 result = self.config(section, name, untrusted=untrusted)
142 if result is None:
142 if result is None:
143 result = default or []
143 result = default or []
144 if isinstance(result, basestring):
144 if isinstance(result, basestring):
145 result = result.replace(",", " ").split()
145 result = result.replace(",", " ").split()
146 return result
146 return result
147
147
148 def has_section(self, section, untrusted=False):
148 def has_section(self, section, untrusted=False):
149 '''tell whether section exists in config.'''
149 '''tell whether section exists in config.'''
150 return section in self._data(untrusted)
150 return section in self._data(untrusted)
151
151
152 def configitems(self, section, untrusted=False):
152 def configitems(self, section, untrusted=False):
153 items = self._data(untrusted).items(section)
153 items = self._data(untrusted).items(section)
154 if self.debugflag and not untrusted and self._reportuntrusted:
154 if self.debugflag and not untrusted and self._reportuntrusted:
155 for k, v in self._ucfg.items(section):
155 for k, v in self._ucfg.items(section):
156 if self._tcfg.get(section, k) != v:
156 if self._tcfg.get(section, k) != v:
157 self.debug(_("ignoring untrusted configuration option "
157 self.debug(_("ignoring untrusted configuration option "
158 "%s.%s = %s\n") % (section, k, v))
158 "%s.%s = %s\n") % (section, k, v))
159 return items
159 return items
160
160
161 def walkconfig(self, untrusted=False):
161 def walkconfig(self, untrusted=False):
162 cfg = self._data(untrusted)
162 cfg = self._data(untrusted)
163 for section in cfg.sections():
163 for section in cfg.sections():
164 for name, value in self.configitems(section, untrusted):
164 for name, value in self.configitems(section, untrusted):
165 yield section, name, str(value).replace('\n', '\\n')
165 yield section, name, str(value).replace('\n', '\\n')
166
166
167 def username(self):
167 def username(self):
168 """Return default username to be used in commits.
168 """Return default username to be used in commits.
169
169
170 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
170 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
171 and stop searching if one of these is set.
171 and stop searching if one of these is set.
172 If not found and ui.askusername is True, ask the user, else use
172 If not found and ui.askusername is True, ask the user, else use
173 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
173 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
174 """
174 """
175 user = os.environ.get("HGUSER")
175 user = os.environ.get("HGUSER")
176 if user is None:
176 if user is None:
177 user = self.config("ui", "username")
177 user = self.config("ui", "username")
178 if user is None:
178 if user is None:
179 user = os.environ.get("EMAIL")
179 user = os.environ.get("EMAIL")
180 if user is None and self.configbool("ui", "askusername"):
180 if user is None and self.configbool("ui", "askusername"):
181 user = self.prompt(_("enter a commit username:"), default=None)
181 user = self.prompt(_("enter a commit username:"), default=None)
182 if user is None:
182 if user is None:
183 try:
183 try:
184 user = '%s@%s' % (util.getuser(), socket.getfqdn())
184 user = '%s@%s' % (util.getuser(), socket.getfqdn())
185 self.warn(_("No username found, using '%s' instead\n") % user)
185 self.warn(_("No username found, using '%s' instead\n") % user)
186 except KeyError:
186 except KeyError:
187 pass
187 pass
188 if not user:
188 if not user:
189 raise util.Abort(_("Please specify a username."))
189 raise util.Abort(_("Please specify a username."))
190 if "\n" in user:
190 if "\n" in user:
191 raise util.Abort(_("username %s contains a newline\n") % repr(user))
191 raise util.Abort(_("username %s contains a newline\n") % repr(user))
192 return user
192 return user
193
193
194 def shortuser(self, user):
194 def shortuser(self, user):
195 """Return a short representation of a user name or email address."""
195 """Return a short representation of a user name or email address."""
196 if not self.verbose: user = util.shortuser(user)
196 if not self.verbose: user = util.shortuser(user)
197 return user
197 return user
198
198
199 def _path(self, loc):
199 def _path(self, loc):
200 p = self.config('paths', loc)
200 p = self.config('paths', loc)
201 if p and '%%' in p:
201 if p and '%%' in p:
202 ui.warn('(deprecated \'\%\%\' in path %s=%s from %s)\n' %
202 ui.warn('(deprecated \'\%\%\' in path %s=%s from %s)\n' %
203 (loc, p, self.configsource('paths', loc)))
203 (loc, p, self.configsource('paths', loc)))
204 p = p.replace('%%', '%')
204 p = p.replace('%%', '%')
205 return p
205 return p
206
206
207 def expandpath(self, loc, default=None):
207 def expandpath(self, loc, default=None):
208 """Return repository location relative to cwd or from [paths]"""
208 """Return repository location relative to cwd or from [paths]"""
209 if "://" in loc or os.path.isdir(os.path.join(loc, '.hg')):
209 if "://" in loc or os.path.isdir(os.path.join(loc, '.hg')):
210 return loc
210 return loc
211
211
212 path = self._path(loc)
212 path = self._path(loc)
213 if not path and default is not None:
213 if not path and default is not None:
214 path = self._path(default)
214 path = self._path(default)
215 return path or loc
215 return path or loc
216
216
217 def pushbuffer(self):
217 def pushbuffer(self):
218 self._buffers.append([])
218 self._buffers.append([])
219
219
220 def popbuffer(self):
220 def popbuffer(self):
221 return "".join(self._buffers.pop())
221 return "".join(self._buffers.pop())
222
222
223 def write(self, *args):
223 def write(self, *args):
224 if self._buffers:
224 if self._buffers:
225 self._buffers[-1].extend([str(a) for a in args])
225 self._buffers[-1].extend([str(a) for a in args])
226 else:
226 else:
227 for a in args:
227 for a in args:
228 sys.stdout.write(str(a))
228 sys.stdout.write(str(a))
229
229
230 def write_err(self, *args):
230 def write_err(self, *args):
231 try:
231 try:
232 if not sys.stdout.closed: sys.stdout.flush()
232 if not sys.stdout.closed: sys.stdout.flush()
233 for a in args:
233 for a in args:
234 sys.stderr.write(str(a))
234 sys.stderr.write(str(a))
235 # stderr may be buffered under win32 when redirected to files,
235 # stderr may be buffered under win32 when redirected to files,
236 # including stdout.
236 # including stdout.
237 if not sys.stderr.closed: sys.stderr.flush()
237 if not sys.stderr.closed: sys.stderr.flush()
238 except IOError, inst:
238 except IOError, inst:
239 if inst.errno != errno.EPIPE:
239 if inst.errno != errno.EPIPE:
240 raise
240 raise
241
241
242 def flush(self):
242 def flush(self):
243 try: sys.stdout.flush()
243 try: sys.stdout.flush()
244 except: pass
244 except: pass
245 try: sys.stderr.flush()
245 try: sys.stderr.flush()
246 except: pass
246 except: pass
247
247
248 def interactive(self):
248 def interactive(self):
249 return self.configbool("ui", "interactive") or sys.stdin.isatty()
249 return self.configbool("ui", "interactive") or sys.stdin.isatty()
250
250
251 def _readline(self, prompt=''):
251 def _readline(self, prompt=''):
252 if sys.stdin.isatty():
252 if sys.stdin.isatty():
253 try:
253 try:
254 # magically add command line editing support, where
254 # magically add command line editing support, where
255 # available
255 # available
256 import readline
256 import readline
257 # force demandimport to really load the module
257 # force demandimport to really load the module
258 readline.read_history_file
258 readline.read_history_file
259 # windows sometimes raises something other than ImportError
259 # windows sometimes raises something other than ImportError
260 except Exception:
260 except Exception:
261 pass
261 pass
262 line = raw_input(prompt)
262 line = raw_input(prompt)
263 # When stdin is in binary mode on Windows, it can cause
263 # When stdin is in binary mode on Windows, it can cause
264 # raw_input() to emit an extra trailing carriage return
264 # raw_input() to emit an extra trailing carriage return
265 if os.linesep == '\r\n' and line and line[-1] == '\r':
265 if os.linesep == '\r\n' and line and line[-1] == '\r':
266 line = line[:-1]
266 line = line[:-1]
267 return line
267 return line
268
268
269 def prompt(self, msg, choices=None, default="y"):
269 def prompt(self, msg, choices=None, default="y"):
270 """Prompt user with msg, read response, and ensure it matches
270 """Prompt user with msg, read response, and ensure it matches
271 one of the provided choices. choices is a sequence of acceptable
271 one of the provided choices. choices is a sequence of acceptable
272 responses with the format: ('&None', 'E&xec', 'Sym&link')
272 responses with the format: ('&None', 'E&xec', 'Sym&link')
273 No sequence implies no response checking. Responses are case
273 No sequence implies no response checking. Responses are case
274 insensitive. If ui is not interactive, the default is returned.
274 insensitive. If ui is not interactive, the default is returned.
275 """
275 """
276 if not self.interactive():
276 if not self.interactive():
277 self.note(msg, ' ', default, "\n")
277 self.note(msg, ' ', default, "\n")
278 return default
278 return default
279 while True:
279 while True:
280 try:
280 try:
281 r = self._readline(msg + ' ')
281 r = self._readline(msg + ' ')
282 if not r:
282 if not r:
283 return default
283 return default
284 if not choices:
284 if not choices:
285 return r
285 return r
286 resps = [s[s.index('&')+1].lower() for s in choices]
286 resps = [s[s.index('&')+1].lower() for s in choices]
287 if r.lower() in resps:
287 if r.lower() in resps:
288 return r.lower()
288 return r.lower()
289 else:
289 else:
290 self.write(_("unrecognized response\n"))
290 self.write(_("unrecognized response\n"))
291 except EOFError:
291 except EOFError:
292 raise util.Abort(_('response expected'))
292 raise util.Abort(_('response expected'))
293
293
294 def getpass(self, prompt=None, default=None):
294 def getpass(self, prompt=None, default=None):
295 if not self.interactive(): return default
295 if not self.interactive(): return default
296 try:
296 try:
297 return getpass.getpass(prompt or _('password: '))
297 return getpass.getpass(prompt or _('password: '))
298 except EOFError:
298 except EOFError:
299 raise util.Abort(_('response expected'))
299 raise util.Abort(_('response expected'))
300 def status(self, *msg):
300 def status(self, *msg):
301 if not self.quiet: self.write(*msg)
301 if not self.quiet: self.write(*msg)
302 def warn(self, *msg):
302 def warn(self, *msg):
303 self.write_err(*msg)
303 self.write_err(*msg)
304 def note(self, *msg):
304 def note(self, *msg):
305 if self.verbose: self.write(*msg)
305 if self.verbose: self.write(*msg)
306 def debug(self, *msg):
306 def debug(self, *msg):
307 if self.debugflag: self.write(*msg)
307 if self.debugflag: self.write(*msg)
308 def edit(self, text, user):
308 def edit(self, text, user):
309 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
309 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
310 text=True)
310 text=True)
311 try:
311 try:
312 f = os.fdopen(fd, "w")
312 f = os.fdopen(fd, "w")
313 f.write(text)
313 f.write(text)
314 f.close()
314 f.close()
315
315
316 editor = self.geteditor()
316 editor = self.geteditor()
317
317
318 util.system("%s \"%s\"" % (editor, name),
318 util.system("%s \"%s\"" % (editor, name),
319 environ={'HGUSER': user},
319 environ={'HGUSER': user},
320 onerr=util.Abort, errprefix=_("edit failed"))
320 onerr=util.Abort, errprefix=_("edit failed"))
321
321
322 f = open(name)
322 f = open(name)
323 t = f.read()
323 t = f.read()
324 f.close()
324 f.close()
325 finally:
325 finally:
326 os.unlink(name)
326 os.unlink(name)
327
327
328 return t
328 return t
329
329
330 def traceback(self):
330 def traceback(self):
331 '''print exception traceback if traceback printing enabled.
331 '''print exception traceback if traceback printing enabled.
332 only to call in exception handler. returns true if traceback
332 only to call in exception handler. returns true if traceback
333 printed.'''
333 printed.'''
334 if self._traceback:
334 if self._traceback:
335 traceback.print_exc()
335 traceback.print_exc()
336 return self._traceback
336 return self._traceback
337
337
338 def geteditor(self):
338 def geteditor(self):
339 '''return editor to use'''
339 '''return editor to use'''
340 return (os.environ.get("HGEDITOR") or
340 return (os.environ.get("HGEDITOR") or
341 self.config("ui", "editor") or
341 self.config("ui", "editor") or
342 os.environ.get("VISUAL") or
342 os.environ.get("VISUAL") or
343 os.environ.get("EDITOR", "vi"))
343 os.environ.get("EDITOR", "vi"))
General Comments 0
You need to be logged in to leave comments. Login now