Show More
@@ -1,572 +1,578 b'' | |||||
1 | # Mercurial extension to provide the 'hg bookmark' command |
|
1 | # Mercurial extension to provide the 'hg bookmark' command | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 David Soria Parra <dsp@php.net> |
|
3 | # Copyright 2008 David Soria Parra <dsp@php.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''track a line of development with movable markers |
|
8 | '''track a line of development with movable markers | |
9 |
|
9 | |||
10 | Bookmarks are local movable markers to changesets. Every bookmark |
|
10 | Bookmarks are local movable markers to changesets. Every bookmark | |
11 | points to a changeset identified by its hash. If you commit a |
|
11 | points to a changeset identified by its hash. If you commit a | |
12 | changeset that is based on a changeset that has a bookmark on it, the |
|
12 | changeset that is based on a changeset that has a bookmark on it, the | |
13 | bookmark shifts to the new changeset. |
|
13 | bookmark shifts to the new changeset. | |
14 |
|
14 | |||
15 | It is possible to use bookmark names in every revision lookup (e.g. |
|
15 | It is possible to use bookmark names in every revision lookup (e.g. | |
16 | :hg:`merge`, :hg:`update`). |
|
16 | :hg:`merge`, :hg:`update`). | |
17 |
|
17 | |||
18 | By default, when several bookmarks point to the same changeset, they |
|
18 | By default, when several bookmarks point to the same changeset, they | |
19 | will all move forward together. It is possible to obtain a more |
|
19 | will all move forward together. It is possible to obtain a more | |
20 | git-like experience by adding the following configuration option to |
|
20 | git-like experience by adding the following configuration option to | |
21 | your configuration file:: |
|
21 | your configuration file:: | |
22 |
|
22 | |||
23 | [bookmarks] |
|
23 | [bookmarks] | |
24 | track.current = True |
|
24 | track.current = True | |
25 |
|
25 | |||
26 | This will cause Mercurial to track the bookmark that you are currently |
|
26 | This will cause Mercurial to track the bookmark that you are currently | |
27 | using, and only update it. This is similar to git's approach to |
|
27 | using, and only update it. This is similar to git's approach to | |
28 | branching. |
|
28 | branching. | |
29 | ''' |
|
29 | ''' | |
30 |
|
30 | |||
31 | from mercurial.i18n import _ |
|
31 | from mercurial.i18n import _ | |
32 | from mercurial.node import nullid, nullrev, bin, hex, short |
|
32 | from mercurial.node import nullid, nullrev, bin, hex, short | |
33 | from mercurial import util, commands, repair, extensions, pushkey, hg, url |
|
33 | from mercurial import util, commands, repair, extensions, pushkey, hg, url | |
34 | from mercurial import revset, encoding |
|
34 | from mercurial import revset, encoding | |
35 | import os |
|
35 | import os | |
36 |
|
36 | |||
37 | def write(repo): |
|
37 | def write(repo): | |
38 | '''Write bookmarks |
|
38 | '''Write bookmarks | |
39 |
|
39 | |||
40 | Write the given bookmark => hash dictionary to the .hg/bookmarks file |
|
40 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
41 | in a format equal to those of localtags. |
|
41 | in a format equal to those of localtags. | |
42 |
|
42 | |||
43 | We also store a backup of the previous state in undo.bookmarks that |
|
43 | We also store a backup of the previous state in undo.bookmarks that | |
44 | can be copied back on rollback. |
|
44 | can be copied back on rollback. | |
45 | ''' |
|
45 | ''' | |
46 | refs = repo._bookmarks |
|
46 | refs = repo._bookmarks | |
47 | if os.path.exists(repo.join('bookmarks')): |
|
47 | ||
48 | util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) |
|
48 | try: | |
|
49 | bms = repo.opener('bookmarks').read() | |||
|
50 | except IOError: | |||
|
51 | bms = None | |||
|
52 | if bms is not None: | |||
|
53 | repo.opener('undo.bookmarks', 'w').write(bms) | |||
|
54 | ||||
49 | if repo._bookmarkcurrent not in refs: |
|
55 | if repo._bookmarkcurrent not in refs: | |
50 | setcurrent(repo, None) |
|
56 | setcurrent(repo, None) | |
51 | wlock = repo.wlock() |
|
57 | wlock = repo.wlock() | |
52 | try: |
|
58 | try: | |
53 | file = repo.opener('bookmarks', 'w', atomictemp=True) |
|
59 | file = repo.opener('bookmarks', 'w', atomictemp=True) | |
54 | for refspec, node in refs.iteritems(): |
|
60 | for refspec, node in refs.iteritems(): | |
55 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec))) |
|
61 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec))) | |
56 | file.rename() |
|
62 | file.rename() | |
57 |
|
63 | |||
58 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) |
|
64 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) | |
59 | try: |
|
65 | try: | |
60 | os.utime(repo.sjoin('00changelog.i'), None) |
|
66 | os.utime(repo.sjoin('00changelog.i'), None) | |
61 | except OSError: |
|
67 | except OSError: | |
62 | pass |
|
68 | pass | |
63 |
|
69 | |||
64 | finally: |
|
70 | finally: | |
65 | wlock.release() |
|
71 | wlock.release() | |
66 |
|
72 | |||
67 | def setcurrent(repo, mark): |
|
73 | def setcurrent(repo, mark): | |
68 | '''Set the name of the bookmark that we are currently on |
|
74 | '''Set the name of the bookmark that we are currently on | |
69 |
|
75 | |||
70 | Set the name of the bookmark that we are on (hg update <bookmark>). |
|
76 | Set the name of the bookmark that we are on (hg update <bookmark>). | |
71 | The name is recorded in .hg/bookmarks.current |
|
77 | The name is recorded in .hg/bookmarks.current | |
72 | ''' |
|
78 | ''' | |
73 | current = repo._bookmarkcurrent |
|
79 | current = repo._bookmarkcurrent | |
74 | if current == mark: |
|
80 | if current == mark: | |
75 | return |
|
81 | return | |
76 |
|
82 | |||
77 | refs = repo._bookmarks |
|
83 | refs = repo._bookmarks | |
78 |
|
84 | |||
79 | # do not update if we do update to a rev equal to the current bookmark |
|
85 | # do not update if we do update to a rev equal to the current bookmark | |
80 | if (mark and mark not in refs and |
|
86 | if (mark and mark not in refs and | |
81 | current and refs[current] == repo.changectx('.').node()): |
|
87 | current and refs[current] == repo.changectx('.').node()): | |
82 | return |
|
88 | return | |
83 | if mark not in refs: |
|
89 | if mark not in refs: | |
84 | mark = '' |
|
90 | mark = '' | |
85 | wlock = repo.wlock() |
|
91 | wlock = repo.wlock() | |
86 | try: |
|
92 | try: | |
87 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) |
|
93 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) | |
88 | file.write(mark) |
|
94 | file.write(mark) | |
89 | file.rename() |
|
95 | file.rename() | |
90 | finally: |
|
96 | finally: | |
91 | wlock.release() |
|
97 | wlock.release() | |
92 | repo._bookmarkcurrent = mark |
|
98 | repo._bookmarkcurrent = mark | |
93 |
|
99 | |||
94 | def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None): |
|
100 | def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None): | |
95 | '''track a line of development with movable markers |
|
101 | '''track a line of development with movable markers | |
96 |
|
102 | |||
97 | Bookmarks are pointers to certain commits that move when |
|
103 | Bookmarks are pointers to certain commits that move when | |
98 | committing. Bookmarks are local. They can be renamed, copied and |
|
104 | committing. Bookmarks are local. They can be renamed, copied and | |
99 | deleted. It is possible to use bookmark names in :hg:`merge` and |
|
105 | deleted. It is possible to use bookmark names in :hg:`merge` and | |
100 | :hg:`update` to merge and update respectively to a given bookmark. |
|
106 | :hg:`update` to merge and update respectively to a given bookmark. | |
101 |
|
107 | |||
102 | You can use :hg:`bookmark NAME` to set a bookmark on the working |
|
108 | You can use :hg:`bookmark NAME` to set a bookmark on the working | |
103 | directory's parent revision with the given name. If you specify |
|
109 | directory's parent revision with the given name. If you specify | |
104 | a revision using -r REV (where REV may be an existing bookmark), |
|
110 | a revision using -r REV (where REV may be an existing bookmark), | |
105 | the bookmark is assigned to that revision. |
|
111 | the bookmark is assigned to that revision. | |
106 |
|
112 | |||
107 | Bookmarks can be pushed and pulled between repositories (see :hg:`help |
|
113 | Bookmarks can be pushed and pulled between repositories (see :hg:`help | |
108 | push` and :hg:`help pull`). This requires the bookmark extension to be |
|
114 | push` and :hg:`help pull`). This requires the bookmark extension to be | |
109 | enabled for both the local and remote repositories. |
|
115 | enabled for both the local and remote repositories. | |
110 | ''' |
|
116 | ''' | |
111 | hexfn = ui.debugflag and hex or short |
|
117 | hexfn = ui.debugflag and hex or short | |
112 | marks = repo._bookmarks |
|
118 | marks = repo._bookmarks | |
113 | cur = repo.changectx('.').node() |
|
119 | cur = repo.changectx('.').node() | |
114 |
|
120 | |||
115 | if rename: |
|
121 | if rename: | |
116 | if rename not in marks: |
|
122 | if rename not in marks: | |
117 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
123 | raise util.Abort(_("a bookmark of this name does not exist")) | |
118 | if mark in marks and not force: |
|
124 | if mark in marks and not force: | |
119 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
125 | raise util.Abort(_("a bookmark of the same name already exists")) | |
120 | if mark is None: |
|
126 | if mark is None: | |
121 | raise util.Abort(_("new bookmark name required")) |
|
127 | raise util.Abort(_("new bookmark name required")) | |
122 | marks[mark] = marks[rename] |
|
128 | marks[mark] = marks[rename] | |
123 | del marks[rename] |
|
129 | del marks[rename] | |
124 | if repo._bookmarkcurrent == rename: |
|
130 | if repo._bookmarkcurrent == rename: | |
125 | setcurrent(repo, mark) |
|
131 | setcurrent(repo, mark) | |
126 | write(repo) |
|
132 | write(repo) | |
127 | return |
|
133 | return | |
128 |
|
134 | |||
129 | if delete: |
|
135 | if delete: | |
130 | if mark is None: |
|
136 | if mark is None: | |
131 | raise util.Abort(_("bookmark name required")) |
|
137 | raise util.Abort(_("bookmark name required")) | |
132 | if mark not in marks: |
|
138 | if mark not in marks: | |
133 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
139 | raise util.Abort(_("a bookmark of this name does not exist")) | |
134 | if mark == repo._bookmarkcurrent: |
|
140 | if mark == repo._bookmarkcurrent: | |
135 | setcurrent(repo, None) |
|
141 | setcurrent(repo, None) | |
136 | del marks[mark] |
|
142 | del marks[mark] | |
137 | write(repo) |
|
143 | write(repo) | |
138 | return |
|
144 | return | |
139 |
|
145 | |||
140 | if mark is not None: |
|
146 | if mark is not None: | |
141 | if "\n" in mark: |
|
147 | if "\n" in mark: | |
142 | raise util.Abort(_("bookmark name cannot contain newlines")) |
|
148 | raise util.Abort(_("bookmark name cannot contain newlines")) | |
143 | mark = mark.strip() |
|
149 | mark = mark.strip() | |
144 | if not mark: |
|
150 | if not mark: | |
145 | raise util.Abort(_("bookmark names cannot consist entirely of " |
|
151 | raise util.Abort(_("bookmark names cannot consist entirely of " | |
146 | "whitespace")) |
|
152 | "whitespace")) | |
147 | if mark in marks and not force: |
|
153 | if mark in marks and not force: | |
148 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
154 | raise util.Abort(_("a bookmark of the same name already exists")) | |
149 | if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) |
|
155 | if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) | |
150 | and not force): |
|
156 | and not force): | |
151 | raise util.Abort( |
|
157 | raise util.Abort( | |
152 | _("a bookmark cannot have the name of an existing branch")) |
|
158 | _("a bookmark cannot have the name of an existing branch")) | |
153 | if rev: |
|
159 | if rev: | |
154 | marks[mark] = repo.lookup(rev) |
|
160 | marks[mark] = repo.lookup(rev) | |
155 | else: |
|
161 | else: | |
156 | marks[mark] = repo.changectx('.').node() |
|
162 | marks[mark] = repo.changectx('.').node() | |
157 | setcurrent(repo, mark) |
|
163 | setcurrent(repo, mark) | |
158 | write(repo) |
|
164 | write(repo) | |
159 | return |
|
165 | return | |
160 |
|
166 | |||
161 | if mark is None: |
|
167 | if mark is None: | |
162 | if rev: |
|
168 | if rev: | |
163 | raise util.Abort(_("bookmark name required")) |
|
169 | raise util.Abort(_("bookmark name required")) | |
164 | if len(marks) == 0: |
|
170 | if len(marks) == 0: | |
165 | ui.status(_("no bookmarks set\n")) |
|
171 | ui.status(_("no bookmarks set\n")) | |
166 | else: |
|
172 | else: | |
167 | for bmark, n in marks.iteritems(): |
|
173 | for bmark, n in marks.iteritems(): | |
168 | if ui.configbool('bookmarks', 'track.current'): |
|
174 | if ui.configbool('bookmarks', 'track.current'): | |
169 | current = repo._bookmarkcurrent |
|
175 | current = repo._bookmarkcurrent | |
170 | if bmark == current and n == cur: |
|
176 | if bmark == current and n == cur: | |
171 | prefix, label = '*', 'bookmarks.current' |
|
177 | prefix, label = '*', 'bookmarks.current' | |
172 | else: |
|
178 | else: | |
173 | prefix, label = ' ', '' |
|
179 | prefix, label = ' ', '' | |
174 | else: |
|
180 | else: | |
175 | if n == cur: |
|
181 | if n == cur: | |
176 | prefix, label = '*', 'bookmarks.current' |
|
182 | prefix, label = '*', 'bookmarks.current' | |
177 | else: |
|
183 | else: | |
178 | prefix, label = ' ', '' |
|
184 | prefix, label = ' ', '' | |
179 |
|
185 | |||
180 | if ui.quiet: |
|
186 | if ui.quiet: | |
181 | ui.write("%s\n" % bmark, label=label) |
|
187 | ui.write("%s\n" % bmark, label=label) | |
182 | else: |
|
188 | else: | |
183 | ui.write(" %s %-25s %d:%s\n" % ( |
|
189 | ui.write(" %s %-25s %d:%s\n" % ( | |
184 | prefix, bmark, repo.changelog.rev(n), hexfn(n)), |
|
190 | prefix, bmark, repo.changelog.rev(n), hexfn(n)), | |
185 | label=label) |
|
191 | label=label) | |
186 | return |
|
192 | return | |
187 |
|
193 | |||
188 | def _revstostrip(changelog, node): |
|
194 | def _revstostrip(changelog, node): | |
189 | srev = changelog.rev(node) |
|
195 | srev = changelog.rev(node) | |
190 | tostrip = [srev] |
|
196 | tostrip = [srev] | |
191 | saveheads = [] |
|
197 | saveheads = [] | |
192 | for r in xrange(srev, len(changelog)): |
|
198 | for r in xrange(srev, len(changelog)): | |
193 | parents = changelog.parentrevs(r) |
|
199 | parents = changelog.parentrevs(r) | |
194 | if parents[0] in tostrip or parents[1] in tostrip: |
|
200 | if parents[0] in tostrip or parents[1] in tostrip: | |
195 | tostrip.append(r) |
|
201 | tostrip.append(r) | |
196 | if parents[1] != nullrev: |
|
202 | if parents[1] != nullrev: | |
197 | for p in parents: |
|
203 | for p in parents: | |
198 | if p not in tostrip and p > srev: |
|
204 | if p not in tostrip and p > srev: | |
199 | saveheads.append(p) |
|
205 | saveheads.append(p) | |
200 | return [r for r in tostrip if r not in saveheads] |
|
206 | return [r for r in tostrip if r not in saveheads] | |
201 |
|
207 | |||
202 | def strip(oldstrip, ui, repo, node, backup="all"): |
|
208 | def strip(oldstrip, ui, repo, node, backup="all"): | |
203 | """Strip bookmarks if revisions are stripped using |
|
209 | """Strip bookmarks if revisions are stripped using | |
204 | the mercurial.strip method. This usually happens during |
|
210 | the mercurial.strip method. This usually happens during | |
205 | qpush and qpop""" |
|
211 | qpush and qpop""" | |
206 | revisions = _revstostrip(repo.changelog, node) |
|
212 | revisions = _revstostrip(repo.changelog, node) | |
207 | marks = repo._bookmarks |
|
213 | marks = repo._bookmarks | |
208 | update = [] |
|
214 | update = [] | |
209 | for mark, n in marks.iteritems(): |
|
215 | for mark, n in marks.iteritems(): | |
210 | if repo.changelog.rev(n) in revisions: |
|
216 | if repo.changelog.rev(n) in revisions: | |
211 | update.append(mark) |
|
217 | update.append(mark) | |
212 | oldstrip(ui, repo, node, backup) |
|
218 | oldstrip(ui, repo, node, backup) | |
213 | if len(update) > 0: |
|
219 | if len(update) > 0: | |
214 | for m in update: |
|
220 | for m in update: | |
215 | marks[m] = repo.changectx('.').node() |
|
221 | marks[m] = repo.changectx('.').node() | |
216 | write(repo) |
|
222 | write(repo) | |
217 |
|
223 | |||
218 | def reposetup(ui, repo): |
|
224 | def reposetup(ui, repo): | |
219 | if not repo.local(): |
|
225 | if not repo.local(): | |
220 | return |
|
226 | return | |
221 |
|
227 | |||
222 | class bookmark_repo(repo.__class__): |
|
228 | class bookmark_repo(repo.__class__): | |
223 |
|
229 | |||
224 | @util.propertycache |
|
230 | @util.propertycache | |
225 | def _bookmarks(self): |
|
231 | def _bookmarks(self): | |
226 | '''Parse .hg/bookmarks file and return a dictionary |
|
232 | '''Parse .hg/bookmarks file and return a dictionary | |
227 |
|
233 | |||
228 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values |
|
234 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values | |
229 | in the .hg/bookmarks file. |
|
235 | in the .hg/bookmarks file. | |
230 | Read the file and return a (name=>nodeid) dictionary |
|
236 | Read the file and return a (name=>nodeid) dictionary | |
231 | ''' |
|
237 | ''' | |
232 | try: |
|
238 | try: | |
233 | bookmarks = {} |
|
239 | bookmarks = {} | |
234 | for line in self.opener('bookmarks'): |
|
240 | for line in self.opener('bookmarks'): | |
235 | sha, refspec = line.strip().split(' ', 1) |
|
241 | sha, refspec = line.strip().split(' ', 1) | |
236 | refspec = encoding.tolocal(refspec) |
|
242 | refspec = encoding.tolocal(refspec) | |
237 | bookmarks[refspec] = self.changelog.lookup(sha) |
|
243 | bookmarks[refspec] = self.changelog.lookup(sha) | |
238 | except: |
|
244 | except: | |
239 | pass |
|
245 | pass | |
240 | return bookmarks |
|
246 | return bookmarks | |
241 |
|
247 | |||
242 | @util.propertycache |
|
248 | @util.propertycache | |
243 | def _bookmarkcurrent(self): |
|
249 | def _bookmarkcurrent(self): | |
244 | '''Get the current bookmark |
|
250 | '''Get the current bookmark | |
245 |
|
251 | |||
246 | If we use gittishsh branches we have a current bookmark that |
|
252 | If we use gittishsh branches we have a current bookmark that | |
247 | we are on. This function returns the name of the bookmark. It |
|
253 | we are on. This function returns the name of the bookmark. It | |
248 | is stored in .hg/bookmarks.current |
|
254 | is stored in .hg/bookmarks.current | |
249 | ''' |
|
255 | ''' | |
250 | mark = None |
|
256 | mark = None | |
251 | if os.path.exists(self.join('bookmarks.current')): |
|
257 | if os.path.exists(self.join('bookmarks.current')): | |
252 | file = self.opener('bookmarks.current') |
|
258 | file = self.opener('bookmarks.current') | |
253 | # No readline() in posixfile_nt, reading everything is cheap |
|
259 | # No readline() in posixfile_nt, reading everything is cheap | |
254 | mark = (file.readlines() or [''])[0] |
|
260 | mark = (file.readlines() or [''])[0] | |
255 | if mark == '': |
|
261 | if mark == '': | |
256 | mark = None |
|
262 | mark = None | |
257 | file.close() |
|
263 | file.close() | |
258 | return mark |
|
264 | return mark | |
259 |
|
265 | |||
260 | def rollback(self, *args): |
|
266 | def rollback(self, *args): | |
261 | if os.path.exists(self.join('undo.bookmarks')): |
|
267 | if os.path.exists(self.join('undo.bookmarks')): | |
262 | util.rename(self.join('undo.bookmarks'), self.join('bookmarks')) |
|
268 | util.rename(self.join('undo.bookmarks'), self.join('bookmarks')) | |
263 | return super(bookmark_repo, self).rollback(*args) |
|
269 | return super(bookmark_repo, self).rollback(*args) | |
264 |
|
270 | |||
265 | def lookup(self, key): |
|
271 | def lookup(self, key): | |
266 | if key in self._bookmarks: |
|
272 | if key in self._bookmarks: | |
267 | key = self._bookmarks[key] |
|
273 | key = self._bookmarks[key] | |
268 | return super(bookmark_repo, self).lookup(key) |
|
274 | return super(bookmark_repo, self).lookup(key) | |
269 |
|
275 | |||
270 | def _bookmarksupdate(self, parents, node): |
|
276 | def _bookmarksupdate(self, parents, node): | |
271 | marks = self._bookmarks |
|
277 | marks = self._bookmarks | |
272 | update = False |
|
278 | update = False | |
273 | if ui.configbool('bookmarks', 'track.current'): |
|
279 | if ui.configbool('bookmarks', 'track.current'): | |
274 | mark = self._bookmarkcurrent |
|
280 | mark = self._bookmarkcurrent | |
275 | if mark and marks[mark] in parents: |
|
281 | if mark and marks[mark] in parents: | |
276 | marks[mark] = node |
|
282 | marks[mark] = node | |
277 | update = True |
|
283 | update = True | |
278 | else: |
|
284 | else: | |
279 | for mark, n in marks.items(): |
|
285 | for mark, n in marks.items(): | |
280 | if n in parents: |
|
286 | if n in parents: | |
281 | marks[mark] = node |
|
287 | marks[mark] = node | |
282 | update = True |
|
288 | update = True | |
283 | if update: |
|
289 | if update: | |
284 | write(self) |
|
290 | write(self) | |
285 |
|
291 | |||
286 | def commitctx(self, ctx, error=False): |
|
292 | def commitctx(self, ctx, error=False): | |
287 | """Add a revision to the repository and |
|
293 | """Add a revision to the repository and | |
288 | move the bookmark""" |
|
294 | move the bookmark""" | |
289 | wlock = self.wlock() # do both commit and bookmark with lock held |
|
295 | wlock = self.wlock() # do both commit and bookmark with lock held | |
290 | try: |
|
296 | try: | |
291 | node = super(bookmark_repo, self).commitctx(ctx, error) |
|
297 | node = super(bookmark_repo, self).commitctx(ctx, error) | |
292 | if node is None: |
|
298 | if node is None: | |
293 | return None |
|
299 | return None | |
294 | parents = self.changelog.parents(node) |
|
300 | parents = self.changelog.parents(node) | |
295 | if parents[1] == nullid: |
|
301 | if parents[1] == nullid: | |
296 | parents = (parents[0],) |
|
302 | parents = (parents[0],) | |
297 |
|
303 | |||
298 | self._bookmarksupdate(parents, node) |
|
304 | self._bookmarksupdate(parents, node) | |
299 | return node |
|
305 | return node | |
300 | finally: |
|
306 | finally: | |
301 | wlock.release() |
|
307 | wlock.release() | |
302 |
|
308 | |||
303 | def pull(self, remote, heads=None, force=False): |
|
309 | def pull(self, remote, heads=None, force=False): | |
304 | result = super(bookmark_repo, self).pull(remote, heads, force) |
|
310 | result = super(bookmark_repo, self).pull(remote, heads, force) | |
305 |
|
311 | |||
306 | self.ui.debug("checking for updated bookmarks\n") |
|
312 | self.ui.debug("checking for updated bookmarks\n") | |
307 | rb = remote.listkeys('bookmarks') |
|
313 | rb = remote.listkeys('bookmarks') | |
308 | changed = False |
|
314 | changed = False | |
309 | for k in rb.keys(): |
|
315 | for k in rb.keys(): | |
310 | if k in self._bookmarks: |
|
316 | if k in self._bookmarks: | |
311 | nr, nl = rb[k], self._bookmarks[k] |
|
317 | nr, nl = rb[k], self._bookmarks[k] | |
312 | if nr in self: |
|
318 | if nr in self: | |
313 | cr = self[nr] |
|
319 | cr = self[nr] | |
314 | cl = self[nl] |
|
320 | cl = self[nl] | |
315 | if cl.rev() >= cr.rev(): |
|
321 | if cl.rev() >= cr.rev(): | |
316 | continue |
|
322 | continue | |
317 | if cr in cl.descendants(): |
|
323 | if cr in cl.descendants(): | |
318 | self._bookmarks[k] = cr.node() |
|
324 | self._bookmarks[k] = cr.node() | |
319 | changed = True |
|
325 | changed = True | |
320 | self.ui.status(_("updating bookmark %s\n") % k) |
|
326 | self.ui.status(_("updating bookmark %s\n") % k) | |
321 | else: |
|
327 | else: | |
322 | self.ui.warn(_("not updating divergent" |
|
328 | self.ui.warn(_("not updating divergent" | |
323 | " bookmark %s\n") % k) |
|
329 | " bookmark %s\n") % k) | |
324 | if changed: |
|
330 | if changed: | |
325 | write(repo) |
|
331 | write(repo) | |
326 |
|
332 | |||
327 | return result |
|
333 | return result | |
328 |
|
334 | |||
329 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
335 | def push(self, remote, force=False, revs=None, newbranch=False): | |
330 | result = super(bookmark_repo, self).push(remote, force, revs, |
|
336 | result = super(bookmark_repo, self).push(remote, force, revs, | |
331 | newbranch) |
|
337 | newbranch) | |
332 |
|
338 | |||
333 | self.ui.debug("checking for updated bookmarks\n") |
|
339 | self.ui.debug("checking for updated bookmarks\n") | |
334 | rb = remote.listkeys('bookmarks') |
|
340 | rb = remote.listkeys('bookmarks') | |
335 | for k in rb.keys(): |
|
341 | for k in rb.keys(): | |
336 | if k in self._bookmarks: |
|
342 | if k in self._bookmarks: | |
337 | nr, nl = rb[k], hex(self._bookmarks[k]) |
|
343 | nr, nl = rb[k], hex(self._bookmarks[k]) | |
338 | if nr in self: |
|
344 | if nr in self: | |
339 | cr = self[nr] |
|
345 | cr = self[nr] | |
340 | cl = self[nl] |
|
346 | cl = self[nl] | |
341 | if cl in cr.descendants(): |
|
347 | if cl in cr.descendants(): | |
342 | r = remote.pushkey('bookmarks', k, nr, nl) |
|
348 | r = remote.pushkey('bookmarks', k, nr, nl) | |
343 | if r: |
|
349 | if r: | |
344 | self.ui.status(_("updating bookmark %s\n") % k) |
|
350 | self.ui.status(_("updating bookmark %s\n") % k) | |
345 | else: |
|
351 | else: | |
346 | self.ui.warn(_('updating bookmark %s' |
|
352 | self.ui.warn(_('updating bookmark %s' | |
347 | ' failed!\n') % k) |
|
353 | ' failed!\n') % k) | |
348 |
|
354 | |||
349 | return result |
|
355 | return result | |
350 |
|
356 | |||
351 | def addchangegroup(self, *args, **kwargs): |
|
357 | def addchangegroup(self, *args, **kwargs): | |
352 | result = super(bookmark_repo, self).addchangegroup(*args, **kwargs) |
|
358 | result = super(bookmark_repo, self).addchangegroup(*args, **kwargs) | |
353 | if result > 1: |
|
359 | if result > 1: | |
354 | # We have more heads than before |
|
360 | # We have more heads than before | |
355 | return result |
|
361 | return result | |
356 | node = self.changelog.tip() |
|
362 | node = self.changelog.tip() | |
357 | parents = self.dirstate.parents() |
|
363 | parents = self.dirstate.parents() | |
358 | self._bookmarksupdate(parents, node) |
|
364 | self._bookmarksupdate(parents, node) | |
359 | return result |
|
365 | return result | |
360 |
|
366 | |||
361 | def _findtags(self): |
|
367 | def _findtags(self): | |
362 | """Merge bookmarks with normal tags""" |
|
368 | """Merge bookmarks with normal tags""" | |
363 | (tags, tagtypes) = super(bookmark_repo, self)._findtags() |
|
369 | (tags, tagtypes) = super(bookmark_repo, self)._findtags() | |
364 | tags.update(self._bookmarks) |
|
370 | tags.update(self._bookmarks) | |
365 | return (tags, tagtypes) |
|
371 | return (tags, tagtypes) | |
366 |
|
372 | |||
367 | if hasattr(repo, 'invalidate'): |
|
373 | if hasattr(repo, 'invalidate'): | |
368 | def invalidate(self): |
|
374 | def invalidate(self): | |
369 | super(bookmark_repo, self).invalidate() |
|
375 | super(bookmark_repo, self).invalidate() | |
370 | for attr in ('_bookmarks', '_bookmarkcurrent'): |
|
376 | for attr in ('_bookmarks', '_bookmarkcurrent'): | |
371 | if attr in self.__dict__: |
|
377 | if attr in self.__dict__: | |
372 | delattr(self, attr) |
|
378 | delattr(self, attr) | |
373 |
|
379 | |||
374 | repo.__class__ = bookmark_repo |
|
380 | repo.__class__ = bookmark_repo | |
375 |
|
381 | |||
376 | def listbookmarks(repo): |
|
382 | def listbookmarks(repo): | |
377 | # We may try to list bookmarks on a repo type that does not |
|
383 | # We may try to list bookmarks on a repo type that does not | |
378 | # support it (e.g., statichttprepository). |
|
384 | # support it (e.g., statichttprepository). | |
379 | if not hasattr(repo, '_bookmarks'): |
|
385 | if not hasattr(repo, '_bookmarks'): | |
380 | return {} |
|
386 | return {} | |
381 |
|
387 | |||
382 | d = {} |
|
388 | d = {} | |
383 | for k, v in repo._bookmarks.iteritems(): |
|
389 | for k, v in repo._bookmarks.iteritems(): | |
384 | d[k] = hex(v) |
|
390 | d[k] = hex(v) | |
385 | return d |
|
391 | return d | |
386 |
|
392 | |||
387 | def pushbookmark(repo, key, old, new): |
|
393 | def pushbookmark(repo, key, old, new): | |
388 | w = repo.wlock() |
|
394 | w = repo.wlock() | |
389 | try: |
|
395 | try: | |
390 | marks = repo._bookmarks |
|
396 | marks = repo._bookmarks | |
391 | if hex(marks.get(key, '')) != old: |
|
397 | if hex(marks.get(key, '')) != old: | |
392 | return False |
|
398 | return False | |
393 | if new == '': |
|
399 | if new == '': | |
394 | del marks[key] |
|
400 | del marks[key] | |
395 | else: |
|
401 | else: | |
396 | if new not in repo: |
|
402 | if new not in repo: | |
397 | return False |
|
403 | return False | |
398 | marks[key] = repo[new].node() |
|
404 | marks[key] = repo[new].node() | |
399 | write(repo) |
|
405 | write(repo) | |
400 | return True |
|
406 | return True | |
401 | finally: |
|
407 | finally: | |
402 | w.release() |
|
408 | w.release() | |
403 |
|
409 | |||
404 | def pull(oldpull, ui, repo, source="default", **opts): |
|
410 | def pull(oldpull, ui, repo, source="default", **opts): | |
405 | # translate bookmark args to rev args for actual pull |
|
411 | # translate bookmark args to rev args for actual pull | |
406 | if opts.get('bookmark'): |
|
412 | if opts.get('bookmark'): | |
407 | # this is an unpleasant hack as pull will do this internally |
|
413 | # this is an unpleasant hack as pull will do this internally | |
408 | source, branches = hg.parseurl(ui.expandpath(source), |
|
414 | source, branches = hg.parseurl(ui.expandpath(source), | |
409 | opts.get('branch')) |
|
415 | opts.get('branch')) | |
410 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
416 | other = hg.repository(hg.remoteui(repo, opts), source) | |
411 | rb = other.listkeys('bookmarks') |
|
417 | rb = other.listkeys('bookmarks') | |
412 |
|
418 | |||
413 | for b in opts['bookmark']: |
|
419 | for b in opts['bookmark']: | |
414 | if b not in rb: |
|
420 | if b not in rb: | |
415 | raise util.Abort(_('remote bookmark %s not found!') % b) |
|
421 | raise util.Abort(_('remote bookmark %s not found!') % b) | |
416 | opts.setdefault('rev', []).append(b) |
|
422 | opts.setdefault('rev', []).append(b) | |
417 |
|
423 | |||
418 | result = oldpull(ui, repo, source, **opts) |
|
424 | result = oldpull(ui, repo, source, **opts) | |
419 |
|
425 | |||
420 | # update specified bookmarks |
|
426 | # update specified bookmarks | |
421 | if opts.get('bookmark'): |
|
427 | if opts.get('bookmark'): | |
422 | for b in opts['bookmark']: |
|
428 | for b in opts['bookmark']: | |
423 | # explicit pull overrides local bookmark if any |
|
429 | # explicit pull overrides local bookmark if any | |
424 | ui.status(_("importing bookmark %s\n") % b) |
|
430 | ui.status(_("importing bookmark %s\n") % b) | |
425 | repo._bookmarks[b] = repo[rb[b]].node() |
|
431 | repo._bookmarks[b] = repo[rb[b]].node() | |
426 | write(repo) |
|
432 | write(repo) | |
427 |
|
433 | |||
428 | return result |
|
434 | return result | |
429 |
|
435 | |||
430 | def push(oldpush, ui, repo, dest=None, **opts): |
|
436 | def push(oldpush, ui, repo, dest=None, **opts): | |
431 | dopush = True |
|
437 | dopush = True | |
432 | if opts.get('bookmark'): |
|
438 | if opts.get('bookmark'): | |
433 | dopush = False |
|
439 | dopush = False | |
434 | for b in opts['bookmark']: |
|
440 | for b in opts['bookmark']: | |
435 | if b in repo._bookmarks: |
|
441 | if b in repo._bookmarks: | |
436 | dopush = True |
|
442 | dopush = True | |
437 | opts.setdefault('rev', []).append(b) |
|
443 | opts.setdefault('rev', []).append(b) | |
438 |
|
444 | |||
439 | result = 0 |
|
445 | result = 0 | |
440 | if dopush: |
|
446 | if dopush: | |
441 | result = oldpush(ui, repo, dest, **opts) |
|
447 | result = oldpush(ui, repo, dest, **opts) | |
442 |
|
448 | |||
443 | if opts.get('bookmark'): |
|
449 | if opts.get('bookmark'): | |
444 | # this is an unpleasant hack as push will do this internally |
|
450 | # this is an unpleasant hack as push will do this internally | |
445 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
451 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
446 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
452 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
447 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
453 | other = hg.repository(hg.remoteui(repo, opts), dest) | |
448 | rb = other.listkeys('bookmarks') |
|
454 | rb = other.listkeys('bookmarks') | |
449 | for b in opts['bookmark']: |
|
455 | for b in opts['bookmark']: | |
450 | # explicit push overrides remote bookmark if any |
|
456 | # explicit push overrides remote bookmark if any | |
451 | if b in repo._bookmarks: |
|
457 | if b in repo._bookmarks: | |
452 | ui.status(_("exporting bookmark %s\n") % b) |
|
458 | ui.status(_("exporting bookmark %s\n") % b) | |
453 | new = repo[b].hex() |
|
459 | new = repo[b].hex() | |
454 | elif b in rb: |
|
460 | elif b in rb: | |
455 | ui.status(_("deleting remote bookmark %s\n") % b) |
|
461 | ui.status(_("deleting remote bookmark %s\n") % b) | |
456 | new = '' # delete |
|
462 | new = '' # delete | |
457 | else: |
|
463 | else: | |
458 | ui.warn(_('bookmark %s does not exist on the local ' |
|
464 | ui.warn(_('bookmark %s does not exist on the local ' | |
459 | 'or remote repository!\n') % b) |
|
465 | 'or remote repository!\n') % b) | |
460 | return 2 |
|
466 | return 2 | |
461 | old = rb.get(b, '') |
|
467 | old = rb.get(b, '') | |
462 | r = other.pushkey('bookmarks', b, old, new) |
|
468 | r = other.pushkey('bookmarks', b, old, new) | |
463 | if not r: |
|
469 | if not r: | |
464 | ui.warn(_('updating bookmark %s failed!\n') % b) |
|
470 | ui.warn(_('updating bookmark %s failed!\n') % b) | |
465 | if not result: |
|
471 | if not result: | |
466 | result = 2 |
|
472 | result = 2 | |
467 |
|
473 | |||
468 | return result |
|
474 | return result | |
469 |
|
475 | |||
470 | def diffbookmarks(ui, repo, remote): |
|
476 | def diffbookmarks(ui, repo, remote): | |
471 | ui.status(_("searching for changed bookmarks\n")) |
|
477 | ui.status(_("searching for changed bookmarks\n")) | |
472 |
|
478 | |||
473 | lmarks = repo.listkeys('bookmarks') |
|
479 | lmarks = repo.listkeys('bookmarks') | |
474 | rmarks = remote.listkeys('bookmarks') |
|
480 | rmarks = remote.listkeys('bookmarks') | |
475 |
|
481 | |||
476 | diff = sorted(set(rmarks) - set(lmarks)) |
|
482 | diff = sorted(set(rmarks) - set(lmarks)) | |
477 | for k in diff: |
|
483 | for k in diff: | |
478 | ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) |
|
484 | ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) | |
479 |
|
485 | |||
480 | if len(diff) <= 0: |
|
486 | if len(diff) <= 0: | |
481 | ui.status(_("no changed bookmarks found\n")) |
|
487 | ui.status(_("no changed bookmarks found\n")) | |
482 | return 1 |
|
488 | return 1 | |
483 | return 0 |
|
489 | return 0 | |
484 |
|
490 | |||
485 | def incoming(oldincoming, ui, repo, source="default", **opts): |
|
491 | def incoming(oldincoming, ui, repo, source="default", **opts): | |
486 | if opts.get('bookmarks'): |
|
492 | if opts.get('bookmarks'): | |
487 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
493 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) | |
488 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
494 | other = hg.repository(hg.remoteui(repo, opts), source) | |
489 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
495 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) | |
490 | return diffbookmarks(ui, repo, other) |
|
496 | return diffbookmarks(ui, repo, other) | |
491 | else: |
|
497 | else: | |
492 | return oldincoming(ui, repo, source, **opts) |
|
498 | return oldincoming(ui, repo, source, **opts) | |
493 |
|
499 | |||
494 | def outgoing(oldoutgoing, ui, repo, dest=None, **opts): |
|
500 | def outgoing(oldoutgoing, ui, repo, dest=None, **opts): | |
495 | if opts.get('bookmarks'): |
|
501 | if opts.get('bookmarks'): | |
496 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
502 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
497 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
503 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
498 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
504 | other = hg.repository(hg.remoteui(repo, opts), dest) | |
499 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
505 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) | |
500 | return diffbookmarks(ui, other, repo) |
|
506 | return diffbookmarks(ui, other, repo) | |
501 | else: |
|
507 | else: | |
502 | return oldoutgoing(ui, repo, dest, **opts) |
|
508 | return oldoutgoing(ui, repo, dest, **opts) | |
503 |
|
509 | |||
504 | def uisetup(ui): |
|
510 | def uisetup(ui): | |
505 | extensions.wrapfunction(repair, "strip", strip) |
|
511 | extensions.wrapfunction(repair, "strip", strip) | |
506 | if ui.configbool('bookmarks', 'track.current'): |
|
512 | if ui.configbool('bookmarks', 'track.current'): | |
507 | extensions.wrapcommand(commands.table, 'update', updatecurbookmark) |
|
513 | extensions.wrapcommand(commands.table, 'update', updatecurbookmark) | |
508 |
|
514 | |||
509 | entry = extensions.wrapcommand(commands.table, 'pull', pull) |
|
515 | entry = extensions.wrapcommand(commands.table, 'pull', pull) | |
510 | entry[1].append(('B', 'bookmark', [], |
|
516 | entry[1].append(('B', 'bookmark', [], | |
511 | _("bookmark to import"), |
|
517 | _("bookmark to import"), | |
512 | _('BOOKMARK'))) |
|
518 | _('BOOKMARK'))) | |
513 | entry = extensions.wrapcommand(commands.table, 'push', push) |
|
519 | entry = extensions.wrapcommand(commands.table, 'push', push) | |
514 | entry[1].append(('B', 'bookmark', [], |
|
520 | entry[1].append(('B', 'bookmark', [], | |
515 | _("bookmark to export"), |
|
521 | _("bookmark to export"), | |
516 | _('BOOKMARK'))) |
|
522 | _('BOOKMARK'))) | |
517 | entry = extensions.wrapcommand(commands.table, 'incoming', incoming) |
|
523 | entry = extensions.wrapcommand(commands.table, 'incoming', incoming) | |
518 | entry[1].append(('B', 'bookmarks', False, |
|
524 | entry[1].append(('B', 'bookmarks', False, | |
519 | _("compare bookmark"))) |
|
525 | _("compare bookmark"))) | |
520 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) |
|
526 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) | |
521 | entry[1].append(('B', 'bookmarks', False, |
|
527 | entry[1].append(('B', 'bookmarks', False, | |
522 | _("compare bookmark"))) |
|
528 | _("compare bookmark"))) | |
523 |
|
529 | |||
524 | pushkey.register('bookmarks', pushbookmark, listbookmarks) |
|
530 | pushkey.register('bookmarks', pushbookmark, listbookmarks) | |
525 |
|
531 | |||
526 | def updatecurbookmark(orig, ui, repo, *args, **opts): |
|
532 | def updatecurbookmark(orig, ui, repo, *args, **opts): | |
527 | '''Set the current bookmark |
|
533 | '''Set the current bookmark | |
528 |
|
534 | |||
529 | If the user updates to a bookmark we update the .hg/bookmarks.current |
|
535 | If the user updates to a bookmark we update the .hg/bookmarks.current | |
530 | file. |
|
536 | file. | |
531 | ''' |
|
537 | ''' | |
532 | res = orig(ui, repo, *args, **opts) |
|
538 | res = orig(ui, repo, *args, **opts) | |
533 | rev = opts['rev'] |
|
539 | rev = opts['rev'] | |
534 | if not rev and len(args) > 0: |
|
540 | if not rev and len(args) > 0: | |
535 | rev = args[0] |
|
541 | rev = args[0] | |
536 | setcurrent(repo, rev) |
|
542 | setcurrent(repo, rev) | |
537 | return res |
|
543 | return res | |
538 |
|
544 | |||
539 | def bmrevset(repo, subset, x): |
|
545 | def bmrevset(repo, subset, x): | |
540 | """``bookmark([name])`` |
|
546 | """``bookmark([name])`` | |
541 | The named bookmark or all bookmarks. |
|
547 | The named bookmark or all bookmarks. | |
542 | """ |
|
548 | """ | |
543 | # i18n: "bookmark" is a keyword |
|
549 | # i18n: "bookmark" is a keyword | |
544 | args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments')) |
|
550 | args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments')) | |
545 | if args: |
|
551 | if args: | |
546 | bm = revset.getstring(args[0], |
|
552 | bm = revset.getstring(args[0], | |
547 | # i18n: "bookmark" is a keyword |
|
553 | # i18n: "bookmark" is a keyword | |
548 | _('the argument to bookmark must be a string')) |
|
554 | _('the argument to bookmark must be a string')) | |
549 | bmrev = listbookmarks(repo).get(bm, None) |
|
555 | bmrev = listbookmarks(repo).get(bm, None) | |
550 | if bmrev: |
|
556 | if bmrev: | |
551 | bmrev = repo.changelog.rev(bin(bmrev)) |
|
557 | bmrev = repo.changelog.rev(bin(bmrev)) | |
552 | return [r for r in subset if r == bmrev] |
|
558 | return [r for r in subset if r == bmrev] | |
553 | bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()]) |
|
559 | bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()]) | |
554 | return [r for r in subset if r in bms] |
|
560 | return [r for r in subset if r in bms] | |
555 |
|
561 | |||
556 | def extsetup(ui): |
|
562 | def extsetup(ui): | |
557 | revset.symbols['bookmark'] = bmrevset |
|
563 | revset.symbols['bookmark'] = bmrevset | |
558 |
|
564 | |||
559 | cmdtable = { |
|
565 | cmdtable = { | |
560 | "bookmarks": |
|
566 | "bookmarks": | |
561 | (bookmark, |
|
567 | (bookmark, | |
562 | [('f', 'force', False, _('force')), |
|
568 | [('f', 'force', False, _('force')), | |
563 | ('r', 'rev', '', _('revision'), _('REV')), |
|
569 | ('r', 'rev', '', _('revision'), _('REV')), | |
564 | ('d', 'delete', False, _('delete a given bookmark')), |
|
570 | ('d', 'delete', False, _('delete a given bookmark')), | |
565 | ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))], |
|
571 | ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))], | |
566 | _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')), |
|
572 | _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')), | |
567 | } |
|
573 | } | |
568 |
|
574 | |||
569 | colortable = {'bookmarks.current': 'green'} |
|
575 | colortable = {'bookmarks.current': 'green'} | |
570 |
|
576 | |||
571 | # tell hggettext to extract docstrings from these functions: |
|
577 | # tell hggettext to extract docstrings from these functions: | |
572 | i18nfunctions = [bmrevset] |
|
578 | i18nfunctions = [bmrevset] |
@@ -1,272 +1,274 b'' | |||||
1 | # archival.py - revision archival for mercurial |
|
1 | # archival.py - revision archival for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 | from node import hex |
|
9 | from node import hex | |
10 | import cmdutil |
|
10 | import cmdutil | |
11 | import util, encoding |
|
11 | import util, encoding | |
12 | import cStringIO, os, stat, tarfile, time, zipfile |
|
12 | import cStringIO, os, stat, tarfile, time, zipfile | |
13 | import zlib, gzip |
|
13 | import zlib, gzip | |
14 |
|
14 | |||
15 | def tidyprefix(dest, kind, prefix): |
|
15 | def tidyprefix(dest, kind, prefix): | |
16 | '''choose prefix to use for names in archive. make sure prefix is |
|
16 | '''choose prefix to use for names in archive. make sure prefix is | |
17 | safe for consumers.''' |
|
17 | safe for consumers.''' | |
18 |
|
18 | |||
19 | if prefix: |
|
19 | if prefix: | |
20 | prefix = util.normpath(prefix) |
|
20 | prefix = util.normpath(prefix) | |
21 | else: |
|
21 | else: | |
22 | if not isinstance(dest, str): |
|
22 | if not isinstance(dest, str): | |
23 | raise ValueError('dest must be string if no prefix') |
|
23 | raise ValueError('dest must be string if no prefix') | |
24 | prefix = os.path.basename(dest) |
|
24 | prefix = os.path.basename(dest) | |
25 | lower = prefix.lower() |
|
25 | lower = prefix.lower() | |
26 | for sfx in exts.get(kind, []): |
|
26 | for sfx in exts.get(kind, []): | |
27 | if lower.endswith(sfx): |
|
27 | if lower.endswith(sfx): | |
28 | prefix = prefix[:-len(sfx)] |
|
28 | prefix = prefix[:-len(sfx)] | |
29 | break |
|
29 | break | |
30 | lpfx = os.path.normpath(util.localpath(prefix)) |
|
30 | lpfx = os.path.normpath(util.localpath(prefix)) | |
31 | prefix = util.pconvert(lpfx) |
|
31 | prefix = util.pconvert(lpfx) | |
32 | if not prefix.endswith('/'): |
|
32 | if not prefix.endswith('/'): | |
33 | prefix += '/' |
|
33 | prefix += '/' | |
34 | if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: |
|
34 | if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: | |
35 | raise util.Abort(_('archive prefix contains illegal components')) |
|
35 | raise util.Abort(_('archive prefix contains illegal components')) | |
36 | return prefix |
|
36 | return prefix | |
37 |
|
37 | |||
38 | exts = { |
|
38 | exts = { | |
39 | 'tar': ['.tar'], |
|
39 | 'tar': ['.tar'], | |
40 | 'tbz2': ['.tbz2', '.tar.bz2'], |
|
40 | 'tbz2': ['.tbz2', '.tar.bz2'], | |
41 | 'tgz': ['.tgz', '.tar.gz'], |
|
41 | 'tgz': ['.tgz', '.tar.gz'], | |
42 | 'zip': ['.zip'], |
|
42 | 'zip': ['.zip'], | |
43 | } |
|
43 | } | |
44 |
|
44 | |||
45 | def guesskind(dest): |
|
45 | def guesskind(dest): | |
46 | for kind, extensions in exts.iteritems(): |
|
46 | for kind, extensions in exts.iteritems(): | |
47 | if util.any(dest.endswith(ext) for ext in extensions): |
|
47 | if util.any(dest.endswith(ext) for ext in extensions): | |
48 | return kind |
|
48 | return kind | |
49 | return None |
|
49 | return None | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | class tarit(object): |
|
52 | class tarit(object): | |
53 | '''write archive to tar file or stream. can write uncompressed, |
|
53 | '''write archive to tar file or stream. can write uncompressed, | |
54 | or compress with gzip or bzip2.''' |
|
54 | or compress with gzip or bzip2.''' | |
55 |
|
55 | |||
56 | class GzipFileWithTime(gzip.GzipFile): |
|
56 | class GzipFileWithTime(gzip.GzipFile): | |
57 |
|
57 | |||
58 | def __init__(self, *args, **kw): |
|
58 | def __init__(self, *args, **kw): | |
59 | timestamp = None |
|
59 | timestamp = None | |
60 | if 'timestamp' in kw: |
|
60 | if 'timestamp' in kw: | |
61 | timestamp = kw.pop('timestamp') |
|
61 | timestamp = kw.pop('timestamp') | |
62 | if timestamp is None: |
|
62 | if timestamp is None: | |
63 | self.timestamp = time.time() |
|
63 | self.timestamp = time.time() | |
64 | else: |
|
64 | else: | |
65 | self.timestamp = timestamp |
|
65 | self.timestamp = timestamp | |
66 | gzip.GzipFile.__init__(self, *args, **kw) |
|
66 | gzip.GzipFile.__init__(self, *args, **kw) | |
67 |
|
67 | |||
68 | def _write_gzip_header(self): |
|
68 | def _write_gzip_header(self): | |
69 | self.fileobj.write('\037\213') # magic header |
|
69 | self.fileobj.write('\037\213') # magic header | |
70 | self.fileobj.write('\010') # compression method |
|
70 | self.fileobj.write('\010') # compression method | |
71 | # Python 2.6 deprecates self.filename |
|
71 | # Python 2.6 deprecates self.filename | |
72 | fname = getattr(self, 'name', None) or self.filename |
|
72 | fname = getattr(self, 'name', None) or self.filename | |
|
73 | if fname and fname.endswith('.gz'): | |||
|
74 | fname = fname[:-3] | |||
73 | flags = 0 |
|
75 | flags = 0 | |
74 | if fname: |
|
76 | if fname: | |
75 | flags = gzip.FNAME |
|
77 | flags = gzip.FNAME | |
76 | self.fileobj.write(chr(flags)) |
|
78 | self.fileobj.write(chr(flags)) | |
77 | gzip.write32u(self.fileobj, long(self.timestamp)) |
|
79 | gzip.write32u(self.fileobj, long(self.timestamp)) | |
78 | self.fileobj.write('\002') |
|
80 | self.fileobj.write('\002') | |
79 | self.fileobj.write('\377') |
|
81 | self.fileobj.write('\377') | |
80 | if fname: |
|
82 | if fname: | |
81 | self.fileobj.write(fname + '\000') |
|
83 | self.fileobj.write(fname + '\000') | |
82 |
|
84 | |||
83 | def __init__(self, dest, mtime, kind=''): |
|
85 | def __init__(self, dest, mtime, kind=''): | |
84 | self.mtime = mtime |
|
86 | self.mtime = mtime | |
85 |
|
87 | |||
86 | def taropen(name, mode, fileobj=None): |
|
88 | def taropen(name, mode, fileobj=None): | |
87 | if kind == 'gz': |
|
89 | if kind == 'gz': | |
88 | mode = mode[0] |
|
90 | mode = mode[0] | |
89 | if not fileobj: |
|
91 | if not fileobj: | |
90 | fileobj = open(name, mode + 'b') |
|
92 | fileobj = open(name, mode + 'b') | |
91 | gzfileobj = self.GzipFileWithTime(name, mode + 'b', |
|
93 | gzfileobj = self.GzipFileWithTime(name, mode + 'b', | |
92 | zlib.Z_BEST_COMPRESSION, |
|
94 | zlib.Z_BEST_COMPRESSION, | |
93 | fileobj, timestamp=mtime) |
|
95 | fileobj, timestamp=mtime) | |
94 | return tarfile.TarFile.taropen(name, mode, gzfileobj) |
|
96 | return tarfile.TarFile.taropen(name, mode, gzfileobj) | |
95 | else: |
|
97 | else: | |
96 | return tarfile.open(name, mode + kind, fileobj) |
|
98 | return tarfile.open(name, mode + kind, fileobj) | |
97 |
|
99 | |||
98 | if isinstance(dest, str): |
|
100 | if isinstance(dest, str): | |
99 | self.z = taropen(dest, mode='w:') |
|
101 | self.z = taropen(dest, mode='w:') | |
100 | else: |
|
102 | else: | |
101 | # Python 2.5-2.5.1 have a regression that requires a name arg |
|
103 | # Python 2.5-2.5.1 have a regression that requires a name arg | |
102 | self.z = taropen(name='', mode='w|', fileobj=dest) |
|
104 | self.z = taropen(name='', mode='w|', fileobj=dest) | |
103 |
|
105 | |||
104 | def addfile(self, name, mode, islink, data): |
|
106 | def addfile(self, name, mode, islink, data): | |
105 | i = tarfile.TarInfo(name) |
|
107 | i = tarfile.TarInfo(name) | |
106 | i.mtime = self.mtime |
|
108 | i.mtime = self.mtime | |
107 | i.size = len(data) |
|
109 | i.size = len(data) | |
108 | if islink: |
|
110 | if islink: | |
109 | i.type = tarfile.SYMTYPE |
|
111 | i.type = tarfile.SYMTYPE | |
110 | i.mode = 0777 |
|
112 | i.mode = 0777 | |
111 | i.linkname = data |
|
113 | i.linkname = data | |
112 | data = None |
|
114 | data = None | |
113 | i.size = 0 |
|
115 | i.size = 0 | |
114 | else: |
|
116 | else: | |
115 | i.mode = mode |
|
117 | i.mode = mode | |
116 | data = cStringIO.StringIO(data) |
|
118 | data = cStringIO.StringIO(data) | |
117 | self.z.addfile(i, data) |
|
119 | self.z.addfile(i, data) | |
118 |
|
120 | |||
119 | def done(self): |
|
121 | def done(self): | |
120 | self.z.close() |
|
122 | self.z.close() | |
121 |
|
123 | |||
122 | class tellable(object): |
|
124 | class tellable(object): | |
123 | '''provide tell method for zipfile.ZipFile when writing to http |
|
125 | '''provide tell method for zipfile.ZipFile when writing to http | |
124 | response file object.''' |
|
126 | response file object.''' | |
125 |
|
127 | |||
126 | def __init__(self, fp): |
|
128 | def __init__(self, fp): | |
127 | self.fp = fp |
|
129 | self.fp = fp | |
128 | self.offset = 0 |
|
130 | self.offset = 0 | |
129 |
|
131 | |||
130 | def __getattr__(self, key): |
|
132 | def __getattr__(self, key): | |
131 | return getattr(self.fp, key) |
|
133 | return getattr(self.fp, key) | |
132 |
|
134 | |||
133 | def write(self, s): |
|
135 | def write(self, s): | |
134 | self.fp.write(s) |
|
136 | self.fp.write(s) | |
135 | self.offset += len(s) |
|
137 | self.offset += len(s) | |
136 |
|
138 | |||
137 | def tell(self): |
|
139 | def tell(self): | |
138 | return self.offset |
|
140 | return self.offset | |
139 |
|
141 | |||
140 | class zipit(object): |
|
142 | class zipit(object): | |
141 | '''write archive to zip file or stream. can write uncompressed, |
|
143 | '''write archive to zip file or stream. can write uncompressed, | |
142 | or compressed with deflate.''' |
|
144 | or compressed with deflate.''' | |
143 |
|
145 | |||
144 | def __init__(self, dest, mtime, compress=True): |
|
146 | def __init__(self, dest, mtime, compress=True): | |
145 | if not isinstance(dest, str): |
|
147 | if not isinstance(dest, str): | |
146 | try: |
|
148 | try: | |
147 | dest.tell() |
|
149 | dest.tell() | |
148 | except (AttributeError, IOError): |
|
150 | except (AttributeError, IOError): | |
149 | dest = tellable(dest) |
|
151 | dest = tellable(dest) | |
150 | self.z = zipfile.ZipFile(dest, 'w', |
|
152 | self.z = zipfile.ZipFile(dest, 'w', | |
151 | compress and zipfile.ZIP_DEFLATED or |
|
153 | compress and zipfile.ZIP_DEFLATED or | |
152 | zipfile.ZIP_STORED) |
|
154 | zipfile.ZIP_STORED) | |
153 |
|
155 | |||
154 | # Python's zipfile module emits deprecation warnings if we try |
|
156 | # Python's zipfile module emits deprecation warnings if we try | |
155 | # to store files with a date before 1980. |
|
157 | # to store files with a date before 1980. | |
156 | epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) |
|
158 | epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) | |
157 | if mtime < epoch: |
|
159 | if mtime < epoch: | |
158 | mtime = epoch |
|
160 | mtime = epoch | |
159 |
|
161 | |||
160 | self.date_time = time.gmtime(mtime)[:6] |
|
162 | self.date_time = time.gmtime(mtime)[:6] | |
161 |
|
163 | |||
162 | def addfile(self, name, mode, islink, data): |
|
164 | def addfile(self, name, mode, islink, data): | |
163 | i = zipfile.ZipInfo(name, self.date_time) |
|
165 | i = zipfile.ZipInfo(name, self.date_time) | |
164 | i.compress_type = self.z.compression |
|
166 | i.compress_type = self.z.compression | |
165 | # unzip will not honor unix file modes unless file creator is |
|
167 | # unzip will not honor unix file modes unless file creator is | |
166 | # set to unix (id 3). |
|
168 | # set to unix (id 3). | |
167 | i.create_system = 3 |
|
169 | i.create_system = 3 | |
168 | ftype = stat.S_IFREG |
|
170 | ftype = stat.S_IFREG | |
169 | if islink: |
|
171 | if islink: | |
170 | mode = 0777 |
|
172 | mode = 0777 | |
171 | ftype = stat.S_IFLNK |
|
173 | ftype = stat.S_IFLNK | |
172 | i.external_attr = (mode | ftype) << 16L |
|
174 | i.external_attr = (mode | ftype) << 16L | |
173 | self.z.writestr(i, data) |
|
175 | self.z.writestr(i, data) | |
174 |
|
176 | |||
175 | def done(self): |
|
177 | def done(self): | |
176 | self.z.close() |
|
178 | self.z.close() | |
177 |
|
179 | |||
178 | class fileit(object): |
|
180 | class fileit(object): | |
179 | '''write archive as files in directory.''' |
|
181 | '''write archive as files in directory.''' | |
180 |
|
182 | |||
181 | def __init__(self, name, mtime): |
|
183 | def __init__(self, name, mtime): | |
182 | self.basedir = name |
|
184 | self.basedir = name | |
183 | self.opener = util.opener(self.basedir) |
|
185 | self.opener = util.opener(self.basedir) | |
184 |
|
186 | |||
185 | def addfile(self, name, mode, islink, data): |
|
187 | def addfile(self, name, mode, islink, data): | |
186 | if islink: |
|
188 | if islink: | |
187 | self.opener.symlink(data, name) |
|
189 | self.opener.symlink(data, name) | |
188 | return |
|
190 | return | |
189 | f = self.opener(name, "w", atomictemp=True) |
|
191 | f = self.opener(name, "w", atomictemp=True) | |
190 | f.write(data) |
|
192 | f.write(data) | |
191 | f.rename() |
|
193 | f.rename() | |
192 | destfile = os.path.join(self.basedir, name) |
|
194 | destfile = os.path.join(self.basedir, name) | |
193 | os.chmod(destfile, mode) |
|
195 | os.chmod(destfile, mode) | |
194 |
|
196 | |||
195 | def done(self): |
|
197 | def done(self): | |
196 | pass |
|
198 | pass | |
197 |
|
199 | |||
198 | archivers = { |
|
200 | archivers = { | |
199 | 'files': fileit, |
|
201 | 'files': fileit, | |
200 | 'tar': tarit, |
|
202 | 'tar': tarit, | |
201 | 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), |
|
203 | 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), | |
202 | 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), |
|
204 | 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), | |
203 | 'uzip': lambda name, mtime: zipit(name, mtime, False), |
|
205 | 'uzip': lambda name, mtime: zipit(name, mtime, False), | |
204 | 'zip': zipit, |
|
206 | 'zip': zipit, | |
205 | } |
|
207 | } | |
206 |
|
208 | |||
207 | def archive(repo, dest, node, kind, decode=True, matchfn=None, |
|
209 | def archive(repo, dest, node, kind, decode=True, matchfn=None, | |
208 | prefix=None, mtime=None, subrepos=False): |
|
210 | prefix=None, mtime=None, subrepos=False): | |
209 | '''create archive of repo as it was at node. |
|
211 | '''create archive of repo as it was at node. | |
210 |
|
212 | |||
211 | dest can be name of directory, name of archive file, or file |
|
213 | dest can be name of directory, name of archive file, or file | |
212 | object to write archive to. |
|
214 | object to write archive to. | |
213 |
|
215 | |||
214 | kind is type of archive to create. |
|
216 | kind is type of archive to create. | |
215 |
|
217 | |||
216 | decode tells whether to put files through decode filters from |
|
218 | decode tells whether to put files through decode filters from | |
217 | hgrc. |
|
219 | hgrc. | |
218 |
|
220 | |||
219 | matchfn is function to filter names of files to write to archive. |
|
221 | matchfn is function to filter names of files to write to archive. | |
220 |
|
222 | |||
221 | prefix is name of path to put before every archive member.''' |
|
223 | prefix is name of path to put before every archive member.''' | |
222 |
|
224 | |||
223 | if kind == 'files': |
|
225 | if kind == 'files': | |
224 | if prefix: |
|
226 | if prefix: | |
225 | raise util.Abort(_('cannot give prefix when archiving to files')) |
|
227 | raise util.Abort(_('cannot give prefix when archiving to files')) | |
226 | else: |
|
228 | else: | |
227 | prefix = tidyprefix(dest, kind, prefix) |
|
229 | prefix = tidyprefix(dest, kind, prefix) | |
228 |
|
230 | |||
229 | def write(name, mode, islink, getdata): |
|
231 | def write(name, mode, islink, getdata): | |
230 | if matchfn and not matchfn(name): |
|
232 | if matchfn and not matchfn(name): | |
231 | return |
|
233 | return | |
232 | data = getdata() |
|
234 | data = getdata() | |
233 | if decode: |
|
235 | if decode: | |
234 | data = repo.wwritedata(name, data) |
|
236 | data = repo.wwritedata(name, data) | |
235 | archiver.addfile(prefix + name, mode, islink, data) |
|
237 | archiver.addfile(prefix + name, mode, islink, data) | |
236 |
|
238 | |||
237 | if kind not in archivers: |
|
239 | if kind not in archivers: | |
238 | raise util.Abort(_("unknown archive type '%s'") % kind) |
|
240 | raise util.Abort(_("unknown archive type '%s'") % kind) | |
239 |
|
241 | |||
240 | ctx = repo[node] |
|
242 | ctx = repo[node] | |
241 | archiver = archivers[kind](dest, mtime or ctx.date()[0]) |
|
243 | archiver = archivers[kind](dest, mtime or ctx.date()[0]) | |
242 |
|
244 | |||
243 | if repo.ui.configbool("ui", "archivemeta", True): |
|
245 | if repo.ui.configbool("ui", "archivemeta", True): | |
244 | def metadata(): |
|
246 | def metadata(): | |
245 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( |
|
247 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( | |
246 | repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch())) |
|
248 | repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch())) | |
247 |
|
249 | |||
248 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() |
|
250 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() | |
249 | if repo.tagtype(t) == 'global') |
|
251 | if repo.tagtype(t) == 'global') | |
250 | if not tags: |
|
252 | if not tags: | |
251 | repo.ui.pushbuffer() |
|
253 | repo.ui.pushbuffer() | |
252 | opts = {'template': '{latesttag}\n{latesttagdistance}', |
|
254 | opts = {'template': '{latesttag}\n{latesttagdistance}', | |
253 | 'style': '', 'patch': None, 'git': None} |
|
255 | 'style': '', 'patch': None, 'git': None} | |
254 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) |
|
256 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) | |
255 | ltags, dist = repo.ui.popbuffer().split('\n') |
|
257 | ltags, dist = repo.ui.popbuffer().split('\n') | |
256 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) |
|
258 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) | |
257 | tags += 'latesttagdistance: %s\n' % dist |
|
259 | tags += 'latesttagdistance: %s\n' % dist | |
258 |
|
260 | |||
259 | return base + tags |
|
261 | return base + tags | |
260 |
|
262 | |||
261 | write('.hg_archival.txt', 0644, False, metadata) |
|
263 | write('.hg_archival.txt', 0644, False, metadata) | |
262 |
|
264 | |||
263 | for f in ctx: |
|
265 | for f in ctx: | |
264 | ff = ctx.flags(f) |
|
266 | ff = ctx.flags(f) | |
265 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data) |
|
267 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data) | |
266 |
|
268 | |||
267 | if subrepos: |
|
269 | if subrepos: | |
268 | for subpath in ctx.substate: |
|
270 | for subpath in ctx.substate: | |
269 | sub = ctx.sub(subpath) |
|
271 | sub = ctx.sub(subpath) | |
270 | sub.archive(archiver, prefix) |
|
272 | sub.archive(archiver, prefix) | |
271 |
|
273 | |||
272 | archiver.done() |
|
274 | archiver.done() |
@@ -1,1612 +1,1614 b'' | |||||
1 | # patch.py - patch file parsing routines |
|
1 | # patch.py - patch file parsing routines | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> |
|
3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> | |
4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> |
|
4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | import cStringIO, email.Parser, os, re |
|
9 | import cStringIO, email.Parser, os, re | |
10 | import tempfile, zlib |
|
10 | import tempfile, zlib | |
11 |
|
11 | |||
12 | from i18n import _ |
|
12 | from i18n import _ | |
13 | from node import hex, nullid, short |
|
13 | from node import hex, nullid, short | |
14 | import base85, mdiff, util, diffhelpers, copies, encoding |
|
14 | import base85, mdiff, util, diffhelpers, copies, encoding | |
15 |
|
15 | |||
16 | gitre = re.compile('diff --git a/(.*) b/(.*)') |
|
16 | gitre = re.compile('diff --git a/(.*) b/(.*)') | |
17 |
|
17 | |||
18 | class PatchError(Exception): |
|
18 | class PatchError(Exception): | |
19 | pass |
|
19 | pass | |
20 |
|
20 | |||
21 | # helper functions |
|
21 | # helper functions | |
22 |
|
22 | |||
23 | def copyfile(src, dst, basedir): |
|
23 | def copyfile(src, dst, basedir): | |
24 | abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]] |
|
24 | abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]] | |
25 | if os.path.lexists(absdst): |
|
25 | if os.path.lexists(absdst): | |
26 | raise util.Abort(_("cannot create %s: destination already exists") % |
|
26 | raise util.Abort(_("cannot create %s: destination already exists") % | |
27 | dst) |
|
27 | dst) | |
28 |
|
28 | |||
29 | dstdir = os.path.dirname(absdst) |
|
29 | dstdir = os.path.dirname(absdst) | |
30 | if dstdir and not os.path.isdir(dstdir): |
|
30 | if dstdir and not os.path.isdir(dstdir): | |
31 | try: |
|
31 | try: | |
32 | os.makedirs(dstdir) |
|
32 | os.makedirs(dstdir) | |
33 | except IOError: |
|
33 | except IOError: | |
34 | raise util.Abort( |
|
34 | raise util.Abort( | |
35 | _("cannot create %s: unable to create destination directory") |
|
35 | _("cannot create %s: unable to create destination directory") | |
36 | % dst) |
|
36 | % dst) | |
37 |
|
37 | |||
38 | util.copyfile(abssrc, absdst) |
|
38 | util.copyfile(abssrc, absdst) | |
39 |
|
39 | |||
40 | # public functions |
|
40 | # public functions | |
41 |
|
41 | |||
42 | def split(stream): |
|
42 | def split(stream): | |
43 | '''return an iterator of individual patches from a stream''' |
|
43 | '''return an iterator of individual patches from a stream''' | |
44 | def isheader(line, inheader): |
|
44 | def isheader(line, inheader): | |
45 | if inheader and line[0] in (' ', '\t'): |
|
45 | if inheader and line[0] in (' ', '\t'): | |
46 | # continuation |
|
46 | # continuation | |
47 | return True |
|
47 | return True | |
48 | if line[0] in (' ', '-', '+'): |
|
48 | if line[0] in (' ', '-', '+'): | |
49 | # diff line - don't check for header pattern in there |
|
49 | # diff line - don't check for header pattern in there | |
50 | return False |
|
50 | return False | |
51 | l = line.split(': ', 1) |
|
51 | l = line.split(': ', 1) | |
52 | return len(l) == 2 and ' ' not in l[0] |
|
52 | return len(l) == 2 and ' ' not in l[0] | |
53 |
|
53 | |||
54 | def chunk(lines): |
|
54 | def chunk(lines): | |
55 | return cStringIO.StringIO(''.join(lines)) |
|
55 | return cStringIO.StringIO(''.join(lines)) | |
56 |
|
56 | |||
57 | def hgsplit(stream, cur): |
|
57 | def hgsplit(stream, cur): | |
58 | inheader = True |
|
58 | inheader = True | |
59 |
|
59 | |||
60 | for line in stream: |
|
60 | for line in stream: | |
61 | if not line.strip(): |
|
61 | if not line.strip(): | |
62 | inheader = False |
|
62 | inheader = False | |
63 | if not inheader and line.startswith('# HG changeset patch'): |
|
63 | if not inheader and line.startswith('# HG changeset patch'): | |
64 | yield chunk(cur) |
|
64 | yield chunk(cur) | |
65 | cur = [] |
|
65 | cur = [] | |
66 | inheader = True |
|
66 | inheader = True | |
67 |
|
67 | |||
68 | cur.append(line) |
|
68 | cur.append(line) | |
69 |
|
69 | |||
70 | if cur: |
|
70 | if cur: | |
71 | yield chunk(cur) |
|
71 | yield chunk(cur) | |
72 |
|
72 | |||
73 | def mboxsplit(stream, cur): |
|
73 | def mboxsplit(stream, cur): | |
74 | for line in stream: |
|
74 | for line in stream: | |
75 | if line.startswith('From '): |
|
75 | if line.startswith('From '): | |
76 | for c in split(chunk(cur[1:])): |
|
76 | for c in split(chunk(cur[1:])): | |
77 | yield c |
|
77 | yield c | |
78 | cur = [] |
|
78 | cur = [] | |
79 |
|
79 | |||
80 | cur.append(line) |
|
80 | cur.append(line) | |
81 |
|
81 | |||
82 | if cur: |
|
82 | if cur: | |
83 | for c in split(chunk(cur[1:])): |
|
83 | for c in split(chunk(cur[1:])): | |
84 | yield c |
|
84 | yield c | |
85 |
|
85 | |||
86 | def mimesplit(stream, cur): |
|
86 | def mimesplit(stream, cur): | |
87 | def msgfp(m): |
|
87 | def msgfp(m): | |
88 | fp = cStringIO.StringIO() |
|
88 | fp = cStringIO.StringIO() | |
89 | g = email.Generator.Generator(fp, mangle_from_=False) |
|
89 | g = email.Generator.Generator(fp, mangle_from_=False) | |
90 | g.flatten(m) |
|
90 | g.flatten(m) | |
91 | fp.seek(0) |
|
91 | fp.seek(0) | |
92 | return fp |
|
92 | return fp | |
93 |
|
93 | |||
94 | for line in stream: |
|
94 | for line in stream: | |
95 | cur.append(line) |
|
95 | cur.append(line) | |
96 | c = chunk(cur) |
|
96 | c = chunk(cur) | |
97 |
|
97 | |||
98 | m = email.Parser.Parser().parse(c) |
|
98 | m = email.Parser.Parser().parse(c) | |
99 | if not m.is_multipart(): |
|
99 | if not m.is_multipart(): | |
100 | yield msgfp(m) |
|
100 | yield msgfp(m) | |
101 | else: |
|
101 | else: | |
102 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
102 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') | |
103 | for part in m.walk(): |
|
103 | for part in m.walk(): | |
104 | ct = part.get_content_type() |
|
104 | ct = part.get_content_type() | |
105 | if ct not in ok_types: |
|
105 | if ct not in ok_types: | |
106 | continue |
|
106 | continue | |
107 | yield msgfp(part) |
|
107 | yield msgfp(part) | |
108 |
|
108 | |||
109 | def headersplit(stream, cur): |
|
109 | def headersplit(stream, cur): | |
110 | inheader = False |
|
110 | inheader = False | |
111 |
|
111 | |||
112 | for line in stream: |
|
112 | for line in stream: | |
113 | if not inheader and isheader(line, inheader): |
|
113 | if not inheader and isheader(line, inheader): | |
114 | yield chunk(cur) |
|
114 | yield chunk(cur) | |
115 | cur = [] |
|
115 | cur = [] | |
116 | inheader = True |
|
116 | inheader = True | |
117 | if inheader and not isheader(line, inheader): |
|
117 | if inheader and not isheader(line, inheader): | |
118 | inheader = False |
|
118 | inheader = False | |
119 |
|
119 | |||
120 | cur.append(line) |
|
120 | cur.append(line) | |
121 |
|
121 | |||
122 | if cur: |
|
122 | if cur: | |
123 | yield chunk(cur) |
|
123 | yield chunk(cur) | |
124 |
|
124 | |||
125 | def remainder(cur): |
|
125 | def remainder(cur): | |
126 | yield chunk(cur) |
|
126 | yield chunk(cur) | |
127 |
|
127 | |||
128 | class fiter(object): |
|
128 | class fiter(object): | |
129 | def __init__(self, fp): |
|
129 | def __init__(self, fp): | |
130 | self.fp = fp |
|
130 | self.fp = fp | |
131 |
|
131 | |||
132 | def __iter__(self): |
|
132 | def __iter__(self): | |
133 | return self |
|
133 | return self | |
134 |
|
134 | |||
135 | def next(self): |
|
135 | def next(self): | |
136 | l = self.fp.readline() |
|
136 | l = self.fp.readline() | |
137 | if not l: |
|
137 | if not l: | |
138 | raise StopIteration |
|
138 | raise StopIteration | |
139 | return l |
|
139 | return l | |
140 |
|
140 | |||
141 | inheader = False |
|
141 | inheader = False | |
142 | cur = [] |
|
142 | cur = [] | |
143 |
|
143 | |||
144 | mimeheaders = ['content-type'] |
|
144 | mimeheaders = ['content-type'] | |
145 |
|
145 | |||
146 | if not hasattr(stream, 'next'): |
|
146 | if not hasattr(stream, 'next'): | |
147 | # http responses, for example, have readline but not next |
|
147 | # http responses, for example, have readline but not next | |
148 | stream = fiter(stream) |
|
148 | stream = fiter(stream) | |
149 |
|
149 | |||
150 | for line in stream: |
|
150 | for line in stream: | |
151 | cur.append(line) |
|
151 | cur.append(line) | |
152 | if line.startswith('# HG changeset patch'): |
|
152 | if line.startswith('# HG changeset patch'): | |
153 | return hgsplit(stream, cur) |
|
153 | return hgsplit(stream, cur) | |
154 | elif line.startswith('From '): |
|
154 | elif line.startswith('From '): | |
155 | return mboxsplit(stream, cur) |
|
155 | return mboxsplit(stream, cur) | |
156 | elif isheader(line, inheader): |
|
156 | elif isheader(line, inheader): | |
157 | inheader = True |
|
157 | inheader = True | |
158 | if line.split(':', 1)[0].lower() in mimeheaders: |
|
158 | if line.split(':', 1)[0].lower() in mimeheaders: | |
159 | # let email parser handle this |
|
159 | # let email parser handle this | |
160 | return mimesplit(stream, cur) |
|
160 | return mimesplit(stream, cur) | |
161 | elif line.startswith('--- ') and inheader: |
|
161 | elif line.startswith('--- ') and inheader: | |
162 | # No evil headers seen by diff start, split by hand |
|
162 | # No evil headers seen by diff start, split by hand | |
163 | return headersplit(stream, cur) |
|
163 | return headersplit(stream, cur) | |
164 | # Not enough info, keep reading |
|
164 | # Not enough info, keep reading | |
165 |
|
165 | |||
166 | # if we are here, we have a very plain patch |
|
166 | # if we are here, we have a very plain patch | |
167 | return remainder(cur) |
|
167 | return remainder(cur) | |
168 |
|
168 | |||
169 | def extract(ui, fileobj): |
|
169 | def extract(ui, fileobj): | |
170 | '''extract patch from data read from fileobj. |
|
170 | '''extract patch from data read from fileobj. | |
171 |
|
171 | |||
172 | patch can be a normal patch or contained in an email message. |
|
172 | patch can be a normal patch or contained in an email message. | |
173 |
|
173 | |||
174 | return tuple (filename, message, user, date, branch, node, p1, p2). |
|
174 | return tuple (filename, message, user, date, branch, node, p1, p2). | |
175 | Any item in the returned tuple can be None. If filename is None, |
|
175 | Any item in the returned tuple can be None. If filename is None, | |
176 | fileobj did not contain a patch. Caller must unlink filename when done.''' |
|
176 | fileobj did not contain a patch. Caller must unlink filename when done.''' | |
177 |
|
177 | |||
178 | # attempt to detect the start of a patch |
|
178 | # attempt to detect the start of a patch | |
179 | # (this heuristic is borrowed from quilt) |
|
179 | # (this heuristic is borrowed from quilt) | |
180 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' |
|
180 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' | |
181 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' |
|
181 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' | |
182 | r'---[ \t].*?^\+\+\+[ \t]|' |
|
182 | r'---[ \t].*?^\+\+\+[ \t]|' | |
183 | r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL) |
|
183 | r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL) | |
184 |
|
184 | |||
185 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') |
|
185 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') | |
186 | tmpfp = os.fdopen(fd, 'w') |
|
186 | tmpfp = os.fdopen(fd, 'w') | |
187 | try: |
|
187 | try: | |
188 | msg = email.Parser.Parser().parse(fileobj) |
|
188 | msg = email.Parser.Parser().parse(fileobj) | |
189 |
|
189 | |||
190 | subject = msg['Subject'] |
|
190 | subject = msg['Subject'] | |
191 | user = msg['From'] |
|
191 | user = msg['From'] | |
192 | if not subject and not user: |
|
192 | if not subject and not user: | |
193 | # Not an email, restore parsed headers if any |
|
193 | # Not an email, restore parsed headers if any | |
194 | subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n' |
|
194 | subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n' | |
195 |
|
195 | |||
196 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') |
|
196 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') | |
197 | # should try to parse msg['Date'] |
|
197 | # should try to parse msg['Date'] | |
198 | date = None |
|
198 | date = None | |
199 | nodeid = None |
|
199 | nodeid = None | |
200 | branch = None |
|
200 | branch = None | |
201 | parents = [] |
|
201 | parents = [] | |
202 |
|
202 | |||
203 | if subject: |
|
203 | if subject: | |
204 | if subject.startswith('[PATCH'): |
|
204 | if subject.startswith('[PATCH'): | |
205 | pend = subject.find(']') |
|
205 | pend = subject.find(']') | |
206 | if pend >= 0: |
|
206 | if pend >= 0: | |
207 | subject = subject[pend + 1:].lstrip() |
|
207 | subject = subject[pend + 1:].lstrip() | |
208 | subject = subject.replace('\n\t', ' ') |
|
208 | subject = subject.replace('\n\t', ' ') | |
209 | ui.debug('Subject: %s\n' % subject) |
|
209 | ui.debug('Subject: %s\n' % subject) | |
210 | if user: |
|
210 | if user: | |
211 | ui.debug('From: %s\n' % user) |
|
211 | ui.debug('From: %s\n' % user) | |
212 | diffs_seen = 0 |
|
212 | diffs_seen = 0 | |
213 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
213 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') | |
214 | message = '' |
|
214 | message = '' | |
215 | for part in msg.walk(): |
|
215 | for part in msg.walk(): | |
216 | content_type = part.get_content_type() |
|
216 | content_type = part.get_content_type() | |
217 | ui.debug('Content-Type: %s\n' % content_type) |
|
217 | ui.debug('Content-Type: %s\n' % content_type) | |
218 | if content_type not in ok_types: |
|
218 | if content_type not in ok_types: | |
219 | continue |
|
219 | continue | |
220 | payload = part.get_payload(decode=True) |
|
220 | payload = part.get_payload(decode=True) | |
221 | m = diffre.search(payload) |
|
221 | m = diffre.search(payload) | |
222 | if m: |
|
222 | if m: | |
223 | hgpatch = False |
|
223 | hgpatch = False | |
224 | hgpatchheader = False |
|
224 | hgpatchheader = False | |
225 | ignoretext = False |
|
225 | ignoretext = False | |
226 |
|
226 | |||
227 | ui.debug('found patch at byte %d\n' % m.start(0)) |
|
227 | ui.debug('found patch at byte %d\n' % m.start(0)) | |
228 | diffs_seen += 1 |
|
228 | diffs_seen += 1 | |
229 | cfp = cStringIO.StringIO() |
|
229 | cfp = cStringIO.StringIO() | |
230 | for line in payload[:m.start(0)].splitlines(): |
|
230 | for line in payload[:m.start(0)].splitlines(): | |
231 | if line.startswith('# HG changeset patch') and not hgpatch: |
|
231 | if line.startswith('# HG changeset patch') and not hgpatch: | |
232 | ui.debug('patch generated by hg export\n') |
|
232 | ui.debug('patch generated by hg export\n') | |
233 | hgpatch = True |
|
233 | hgpatch = True | |
234 | hgpatchheader = True |
|
234 | hgpatchheader = True | |
235 | # drop earlier commit message content |
|
235 | # drop earlier commit message content | |
236 | cfp.seek(0) |
|
236 | cfp.seek(0) | |
237 | cfp.truncate() |
|
237 | cfp.truncate() | |
238 | subject = None |
|
238 | subject = None | |
239 | elif hgpatchheader: |
|
239 | elif hgpatchheader: | |
240 | if line.startswith('# User '): |
|
240 | if line.startswith('# User '): | |
241 | user = line[7:] |
|
241 | user = line[7:] | |
242 | ui.debug('From: %s\n' % user) |
|
242 | ui.debug('From: %s\n' % user) | |
243 | elif line.startswith("# Date "): |
|
243 | elif line.startswith("# Date "): | |
244 | date = line[7:] |
|
244 | date = line[7:] | |
245 | elif line.startswith("# Branch "): |
|
245 | elif line.startswith("# Branch "): | |
246 | branch = line[9:] |
|
246 | branch = line[9:] | |
247 | elif line.startswith("# Node ID "): |
|
247 | elif line.startswith("# Node ID "): | |
248 | nodeid = line[10:] |
|
248 | nodeid = line[10:] | |
249 | elif line.startswith("# Parent "): |
|
249 | elif line.startswith("# Parent "): | |
250 | parents.append(line[10:]) |
|
250 | parents.append(line[10:]) | |
251 | elif not line.startswith("# "): |
|
251 | elif not line.startswith("# "): | |
252 | hgpatchheader = False |
|
252 | hgpatchheader = False | |
253 | elif line == '---' and gitsendmail: |
|
253 | elif line == '---' and gitsendmail: | |
254 | ignoretext = True |
|
254 | ignoretext = True | |
255 | if not hgpatchheader and not ignoretext: |
|
255 | if not hgpatchheader and not ignoretext: | |
256 | cfp.write(line) |
|
256 | cfp.write(line) | |
257 | cfp.write('\n') |
|
257 | cfp.write('\n') | |
258 | message = cfp.getvalue() |
|
258 | message = cfp.getvalue() | |
259 | if tmpfp: |
|
259 | if tmpfp: | |
260 | tmpfp.write(payload) |
|
260 | tmpfp.write(payload) | |
261 | if not payload.endswith('\n'): |
|
261 | if not payload.endswith('\n'): | |
262 | tmpfp.write('\n') |
|
262 | tmpfp.write('\n') | |
263 | elif not diffs_seen and message and content_type == 'text/plain': |
|
263 | elif not diffs_seen and message and content_type == 'text/plain': | |
264 | message += '\n' + payload |
|
264 | message += '\n' + payload | |
265 | except: |
|
265 | except: | |
266 | tmpfp.close() |
|
266 | tmpfp.close() | |
267 | os.unlink(tmpname) |
|
267 | os.unlink(tmpname) | |
268 | raise |
|
268 | raise | |
269 |
|
269 | |||
270 | if subject and not message.startswith(subject): |
|
270 | if subject and not message.startswith(subject): | |
271 | message = '%s\n%s' % (subject, message) |
|
271 | message = '%s\n%s' % (subject, message) | |
272 | tmpfp.close() |
|
272 | tmpfp.close() | |
273 | if not diffs_seen: |
|
273 | if not diffs_seen: | |
274 | os.unlink(tmpname) |
|
274 | os.unlink(tmpname) | |
275 | return None, message, user, date, branch, None, None, None |
|
275 | return None, message, user, date, branch, None, None, None | |
276 | p1 = parents and parents.pop(0) or None |
|
276 | p1 = parents and parents.pop(0) or None | |
277 | p2 = parents and parents.pop(0) or None |
|
277 | p2 = parents and parents.pop(0) or None | |
278 | return tmpname, message, user, date, branch, nodeid, p1, p2 |
|
278 | return tmpname, message, user, date, branch, nodeid, p1, p2 | |
279 |
|
279 | |||
280 | class patchmeta(object): |
|
280 | class patchmeta(object): | |
281 | """Patched file metadata |
|
281 | """Patched file metadata | |
282 |
|
282 | |||
283 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY |
|
283 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY | |
284 | or COPY. 'path' is patched file path. 'oldpath' is set to the |
|
284 | or COPY. 'path' is patched file path. 'oldpath' is set to the | |
285 | origin file when 'op' is either COPY or RENAME, None otherwise. If |
|
285 | origin file when 'op' is either COPY or RENAME, None otherwise. If | |
286 | file mode is changed, 'mode' is a tuple (islink, isexec) where |
|
286 | file mode is changed, 'mode' is a tuple (islink, isexec) where | |
287 | 'islink' is True if the file is a symlink and 'isexec' is True if |
|
287 | 'islink' is True if the file is a symlink and 'isexec' is True if | |
288 | the file is executable. Otherwise, 'mode' is None. |
|
288 | the file is executable. Otherwise, 'mode' is None. | |
289 | """ |
|
289 | """ | |
290 | def __init__(self, path): |
|
290 | def __init__(self, path): | |
291 | self.path = path |
|
291 | self.path = path | |
292 | self.oldpath = None |
|
292 | self.oldpath = None | |
293 | self.mode = None |
|
293 | self.mode = None | |
294 | self.op = 'MODIFY' |
|
294 | self.op = 'MODIFY' | |
295 | self.binary = False |
|
295 | self.binary = False | |
296 |
|
296 | |||
297 | def setmode(self, mode): |
|
297 | def setmode(self, mode): | |
298 | islink = mode & 020000 |
|
298 | islink = mode & 020000 | |
299 | isexec = mode & 0100 |
|
299 | isexec = mode & 0100 | |
300 | self.mode = (islink, isexec) |
|
300 | self.mode = (islink, isexec) | |
301 |
|
301 | |||
302 | def __repr__(self): |
|
302 | def __repr__(self): | |
303 | return "<patchmeta %s %r>" % (self.op, self.path) |
|
303 | return "<patchmeta %s %r>" % (self.op, self.path) | |
304 |
|
304 | |||
305 | def readgitpatch(lr): |
|
305 | def readgitpatch(lr): | |
306 | """extract git-style metadata about patches from <patchname>""" |
|
306 | """extract git-style metadata about patches from <patchname>""" | |
307 |
|
307 | |||
308 | # Filter patch for git information |
|
308 | # Filter patch for git information | |
309 | gp = None |
|
309 | gp = None | |
310 | gitpatches = [] |
|
310 | gitpatches = [] | |
311 | for line in lr: |
|
311 | for line in lr: | |
312 | line = line.rstrip(' \r\n') |
|
312 | line = line.rstrip(' \r\n') | |
313 | if line.startswith('diff --git'): |
|
313 | if line.startswith('diff --git'): | |
314 | m = gitre.match(line) |
|
314 | m = gitre.match(line) | |
315 | if m: |
|
315 | if m: | |
316 | if gp: |
|
316 | if gp: | |
317 | gitpatches.append(gp) |
|
317 | gitpatches.append(gp) | |
318 | dst = m.group(2) |
|
318 | dst = m.group(2) | |
319 | gp = patchmeta(dst) |
|
319 | gp = patchmeta(dst) | |
320 | elif gp: |
|
320 | elif gp: | |
321 | if line.startswith('--- '): |
|
321 | if line.startswith('--- '): | |
322 | gitpatches.append(gp) |
|
322 | gitpatches.append(gp) | |
323 | gp = None |
|
323 | gp = None | |
324 | continue |
|
324 | continue | |
325 | if line.startswith('rename from '): |
|
325 | if line.startswith('rename from '): | |
326 | gp.op = 'RENAME' |
|
326 | gp.op = 'RENAME' | |
327 | gp.oldpath = line[12:] |
|
327 | gp.oldpath = line[12:] | |
328 | elif line.startswith('rename to '): |
|
328 | elif line.startswith('rename to '): | |
329 | gp.path = line[10:] |
|
329 | gp.path = line[10:] | |
330 | elif line.startswith('copy from '): |
|
330 | elif line.startswith('copy from '): | |
331 | gp.op = 'COPY' |
|
331 | gp.op = 'COPY' | |
332 | gp.oldpath = line[10:] |
|
332 | gp.oldpath = line[10:] | |
333 | elif line.startswith('copy to '): |
|
333 | elif line.startswith('copy to '): | |
334 | gp.path = line[8:] |
|
334 | gp.path = line[8:] | |
335 | elif line.startswith('deleted file'): |
|
335 | elif line.startswith('deleted file'): | |
336 | gp.op = 'DELETE' |
|
336 | gp.op = 'DELETE' | |
337 | elif line.startswith('new file mode '): |
|
337 | elif line.startswith('new file mode '): | |
338 | gp.op = 'ADD' |
|
338 | gp.op = 'ADD' | |
339 | gp.setmode(int(line[-6:], 8)) |
|
339 | gp.setmode(int(line[-6:], 8)) | |
340 | elif line.startswith('new mode '): |
|
340 | elif line.startswith('new mode '): | |
341 | gp.setmode(int(line[-6:], 8)) |
|
341 | gp.setmode(int(line[-6:], 8)) | |
342 | elif line.startswith('GIT binary patch'): |
|
342 | elif line.startswith('GIT binary patch'): | |
343 | gp.binary = True |
|
343 | gp.binary = True | |
344 | if gp: |
|
344 | if gp: | |
345 | gitpatches.append(gp) |
|
345 | gitpatches.append(gp) | |
346 |
|
346 | |||
347 | return gitpatches |
|
347 | return gitpatches | |
348 |
|
348 | |||
349 | class linereader(object): |
|
349 | class linereader(object): | |
350 | # simple class to allow pushing lines back into the input stream |
|
350 | # simple class to allow pushing lines back into the input stream | |
351 | def __init__(self, fp, textmode=False): |
|
351 | def __init__(self, fp, textmode=False): | |
352 | self.fp = fp |
|
352 | self.fp = fp | |
353 | self.buf = [] |
|
353 | self.buf = [] | |
354 | self.textmode = textmode |
|
354 | self.textmode = textmode | |
355 | self.eol = None |
|
355 | self.eol = None | |
356 |
|
356 | |||
357 | def push(self, line): |
|
357 | def push(self, line): | |
358 | if line is not None: |
|
358 | if line is not None: | |
359 | self.buf.append(line) |
|
359 | self.buf.append(line) | |
360 |
|
360 | |||
361 | def readline(self): |
|
361 | def readline(self): | |
362 | if self.buf: |
|
362 | if self.buf: | |
363 | l = self.buf[0] |
|
363 | l = self.buf[0] | |
364 | del self.buf[0] |
|
364 | del self.buf[0] | |
365 | return l |
|
365 | return l | |
366 | l = self.fp.readline() |
|
366 | l = self.fp.readline() | |
367 | if not self.eol: |
|
367 | if not self.eol: | |
368 | if l.endswith('\r\n'): |
|
368 | if l.endswith('\r\n'): | |
369 | self.eol = '\r\n' |
|
369 | self.eol = '\r\n' | |
370 | elif l.endswith('\n'): |
|
370 | elif l.endswith('\n'): | |
371 | self.eol = '\n' |
|
371 | self.eol = '\n' | |
372 | if self.textmode and l.endswith('\r\n'): |
|
372 | if self.textmode and l.endswith('\r\n'): | |
373 | l = l[:-2] + '\n' |
|
373 | l = l[:-2] + '\n' | |
374 | return l |
|
374 | return l | |
375 |
|
375 | |||
376 | def __iter__(self): |
|
376 | def __iter__(self): | |
377 | while 1: |
|
377 | while 1: | |
378 | l = self.readline() |
|
378 | l = self.readline() | |
379 | if not l: |
|
379 | if not l: | |
380 | break |
|
380 | break | |
381 | yield l |
|
381 | yield l | |
382 |
|
382 | |||
383 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 |
|
383 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 | |
384 | unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@') |
|
384 | unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@') | |
385 | contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)') |
|
385 | contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)') | |
386 | eolmodes = ['strict', 'crlf', 'lf', 'auto'] |
|
386 | eolmodes = ['strict', 'crlf', 'lf', 'auto'] | |
387 |
|
387 | |||
388 | class patchfile(object): |
|
388 | class patchfile(object): | |
389 | def __init__(self, ui, fname, opener, missing=False, eolmode='strict'): |
|
389 | def __init__(self, ui, fname, opener, missing=False, eolmode='strict'): | |
390 | self.fname = fname |
|
390 | self.fname = fname | |
391 | self.eolmode = eolmode |
|
391 | self.eolmode = eolmode | |
392 | self.eol = None |
|
392 | self.eol = None | |
393 | self.opener = opener |
|
393 | self.opener = opener | |
394 | self.ui = ui |
|
394 | self.ui = ui | |
395 | self.lines = [] |
|
395 | self.lines = [] | |
396 | self.exists = False |
|
396 | self.exists = False | |
397 | self.missing = missing |
|
397 | self.missing = missing | |
398 | if not missing: |
|
398 | if not missing: | |
399 | try: |
|
399 | try: | |
400 | self.lines = self.readlines(fname) |
|
400 | self.lines = self.readlines(fname) | |
401 | self.exists = True |
|
401 | self.exists = True | |
402 | except IOError: |
|
402 | except IOError: | |
403 | pass |
|
403 | pass | |
404 | else: |
|
404 | else: | |
405 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) |
|
405 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) | |
406 |
|
406 | |||
407 | self.hash = {} |
|
407 | self.hash = {} | |
408 | self.dirty = 0 |
|
408 | self.dirty = 0 | |
409 | self.offset = 0 |
|
409 | self.offset = 0 | |
410 | self.skew = 0 |
|
410 | self.skew = 0 | |
411 | self.rej = [] |
|
411 | self.rej = [] | |
412 | self.fileprinted = False |
|
412 | self.fileprinted = False | |
413 | self.printfile(False) |
|
413 | self.printfile(False) | |
414 | self.hunks = 0 |
|
414 | self.hunks = 0 | |
415 |
|
415 | |||
416 | def readlines(self, fname): |
|
416 | def readlines(self, fname): | |
417 | if os.path.islink(fname): |
|
417 | if os.path.islink(fname): | |
418 | return [os.readlink(fname)] |
|
418 | return [os.readlink(fname)] | |
419 | fp = self.opener(fname, 'r') |
|
419 | fp = self.opener(fname, 'r') | |
420 | try: |
|
420 | try: | |
421 | lr = linereader(fp, self.eolmode != 'strict') |
|
421 | lr = linereader(fp, self.eolmode != 'strict') | |
422 | lines = list(lr) |
|
422 | lines = list(lr) | |
423 | self.eol = lr.eol |
|
423 | self.eol = lr.eol | |
424 | return lines |
|
424 | return lines | |
425 | finally: |
|
425 | finally: | |
426 | fp.close() |
|
426 | fp.close() | |
427 |
|
427 | |||
428 | def writelines(self, fname, lines): |
|
428 | def writelines(self, fname, lines): | |
429 | # Ensure supplied data ends in fname, being a regular file or |
|
429 | # Ensure supplied data ends in fname, being a regular file or | |
430 | # a symlink. cmdutil.updatedir will -too magically- take care |
|
430 | # a symlink. cmdutil.updatedir will -too magically- take care | |
431 | # of setting it to the proper type afterwards. |
|
431 | # of setting it to the proper type afterwards. | |
432 | islink = os.path.islink(fname) |
|
432 | islink = os.path.islink(fname) | |
433 | if islink: |
|
433 | if islink: | |
434 | fp = cStringIO.StringIO() |
|
434 | fp = cStringIO.StringIO() | |
435 | else: |
|
435 | else: | |
436 | fp = self.opener(fname, 'w') |
|
436 | fp = self.opener(fname, 'w') | |
437 | try: |
|
437 | try: | |
438 | if self.eolmode == 'auto': |
|
438 | if self.eolmode == 'auto': | |
439 | eol = self.eol |
|
439 | eol = self.eol | |
440 | elif self.eolmode == 'crlf': |
|
440 | elif self.eolmode == 'crlf': | |
441 | eol = '\r\n' |
|
441 | eol = '\r\n' | |
442 | else: |
|
442 | else: | |
443 | eol = '\n' |
|
443 | eol = '\n' | |
444 |
|
444 | |||
445 | if self.eolmode != 'strict' and eol and eol != '\n': |
|
445 | if self.eolmode != 'strict' and eol and eol != '\n': | |
446 | for l in lines: |
|
446 | for l in lines: | |
447 | if l and l[-1] == '\n': |
|
447 | if l and l[-1] == '\n': | |
448 | l = l[:-1] + eol |
|
448 | l = l[:-1] + eol | |
449 | fp.write(l) |
|
449 | fp.write(l) | |
450 | else: |
|
450 | else: | |
451 | fp.writelines(lines) |
|
451 | fp.writelines(lines) | |
452 | if islink: |
|
452 | if islink: | |
453 | self.opener.symlink(fp.getvalue(), fname) |
|
453 | self.opener.symlink(fp.getvalue(), fname) | |
454 | finally: |
|
454 | finally: | |
455 | fp.close() |
|
455 | fp.close() | |
456 |
|
456 | |||
457 | def unlink(self, fname): |
|
457 | def unlink(self, fname): | |
458 | os.unlink(fname) |
|
458 | os.unlink(fname) | |
459 |
|
459 | |||
460 | def printfile(self, warn): |
|
460 | def printfile(self, warn): | |
461 | if self.fileprinted: |
|
461 | if self.fileprinted: | |
462 | return |
|
462 | return | |
463 | if warn or self.ui.verbose: |
|
463 | if warn or self.ui.verbose: | |
464 | self.fileprinted = True |
|
464 | self.fileprinted = True | |
465 | s = _("patching file %s\n") % self.fname |
|
465 | s = _("patching file %s\n") % self.fname | |
466 | if warn: |
|
466 | if warn: | |
467 | self.ui.warn(s) |
|
467 | self.ui.warn(s) | |
468 | else: |
|
468 | else: | |
469 | self.ui.note(s) |
|
469 | self.ui.note(s) | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | def findlines(self, l, linenum): |
|
472 | def findlines(self, l, linenum): | |
473 | # looks through the hash and finds candidate lines. The |
|
473 | # looks through the hash and finds candidate lines. The | |
474 | # result is a list of line numbers sorted based on distance |
|
474 | # result is a list of line numbers sorted based on distance | |
475 | # from linenum |
|
475 | # from linenum | |
476 |
|
476 | |||
477 | cand = self.hash.get(l, []) |
|
477 | cand = self.hash.get(l, []) | |
478 | if len(cand) > 1: |
|
478 | if len(cand) > 1: | |
479 | # resort our list of potentials forward then back. |
|
479 | # resort our list of potentials forward then back. | |
480 | cand.sort(key=lambda x: abs(x - linenum)) |
|
480 | cand.sort(key=lambda x: abs(x - linenum)) | |
481 | return cand |
|
481 | return cand | |
482 |
|
482 | |||
483 | def hashlines(self): |
|
483 | def hashlines(self): | |
484 | self.hash = {} |
|
484 | self.hash = {} | |
485 | for x, s in enumerate(self.lines): |
|
485 | for x, s in enumerate(self.lines): | |
486 | self.hash.setdefault(s, []).append(x) |
|
486 | self.hash.setdefault(s, []).append(x) | |
487 |
|
487 | |||
|
488 | def makerejlines(self, fname): | |||
|
489 | base = os.path.basename(fname) | |||
|
490 | yield "--- %s\n+++ %s\n" % (base, base) | |||
|
491 | for x in self.rej: | |||
|
492 | for l in x.hunk: | |||
|
493 | yield l | |||
|
494 | if l[-1] != '\n': | |||
|
495 | yield "\n\ No newline at end of file\n" | |||
|
496 | ||||
488 | def write_rej(self): |
|
497 | def write_rej(self): | |
489 | # our rejects are a little different from patch(1). This always |
|
498 | # our rejects are a little different from patch(1). This always | |
490 | # creates rejects in the same form as the original patch. A file |
|
499 | # creates rejects in the same form as the original patch. A file | |
491 | # header is inserted so that you can run the reject through patch again |
|
500 | # header is inserted so that you can run the reject through patch again | |
492 | # without having to type the filename. |
|
501 | # without having to type the filename. | |
493 |
|
502 | |||
494 | if not self.rej: |
|
503 | if not self.rej: | |
495 | return |
|
504 | return | |
496 |
|
505 | |||
497 | fname = self.fname + ".rej" |
|
506 | fname = self.fname + ".rej" | |
498 | self.ui.warn( |
|
507 | self.ui.warn( | |
499 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % |
|
508 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % | |
500 | (len(self.rej), self.hunks, fname)) |
|
509 | (len(self.rej), self.hunks, fname)) | |
501 |
|
510 | |||
502 | def rejlines(): |
|
511 | fp = self.opener(fname, 'w') | |
503 | base = os.path.basename(self.fname) |
|
512 | fp.writelines(self.makerejlines(self.fname)) | |
504 | yield "--- %s\n+++ %s\n" % (base, base) |
|
513 | fp.close() | |
505 | for x in self.rej: |
|
|||
506 | for l in x.hunk: |
|
|||
507 | yield l |
|
|||
508 | if l[-1] != '\n': |
|
|||
509 | yield "\n\ No newline at end of file\n" |
|
|||
510 |
|
||||
511 | self.writelines(fname, rejlines()) |
|
|||
512 |
|
514 | |||
513 | def apply(self, h): |
|
515 | def apply(self, h): | |
514 | if not h.complete(): |
|
516 | if not h.complete(): | |
515 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % |
|
517 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % | |
516 | (h.number, h.desc, len(h.a), h.lena, len(h.b), |
|
518 | (h.number, h.desc, len(h.a), h.lena, len(h.b), | |
517 | h.lenb)) |
|
519 | h.lenb)) | |
518 |
|
520 | |||
519 | self.hunks += 1 |
|
521 | self.hunks += 1 | |
520 |
|
522 | |||
521 | if self.missing: |
|
523 | if self.missing: | |
522 | self.rej.append(h) |
|
524 | self.rej.append(h) | |
523 | return -1 |
|
525 | return -1 | |
524 |
|
526 | |||
525 | if self.exists and h.createfile(): |
|
527 | if self.exists and h.createfile(): | |
526 | self.ui.warn(_("file %s already exists\n") % self.fname) |
|
528 | self.ui.warn(_("file %s already exists\n") % self.fname) | |
527 | self.rej.append(h) |
|
529 | self.rej.append(h) | |
528 | return -1 |
|
530 | return -1 | |
529 |
|
531 | |||
530 | if isinstance(h, binhunk): |
|
532 | if isinstance(h, binhunk): | |
531 | if h.rmfile(): |
|
533 | if h.rmfile(): | |
532 | self.unlink(self.fname) |
|
534 | self.unlink(self.fname) | |
533 | else: |
|
535 | else: | |
534 | self.lines[:] = h.new() |
|
536 | self.lines[:] = h.new() | |
535 | self.offset += len(h.new()) |
|
537 | self.offset += len(h.new()) | |
536 | self.dirty = 1 |
|
538 | self.dirty = 1 | |
537 | return 0 |
|
539 | return 0 | |
538 |
|
540 | |||
539 | horig = h |
|
541 | horig = h | |
540 | if (self.eolmode in ('crlf', 'lf') |
|
542 | if (self.eolmode in ('crlf', 'lf') | |
541 | or self.eolmode == 'auto' and self.eol): |
|
543 | or self.eolmode == 'auto' and self.eol): | |
542 | # If new eols are going to be normalized, then normalize |
|
544 | # If new eols are going to be normalized, then normalize | |
543 | # hunk data before patching. Otherwise, preserve input |
|
545 | # hunk data before patching. Otherwise, preserve input | |
544 | # line-endings. |
|
546 | # line-endings. | |
545 | h = h.getnormalized() |
|
547 | h = h.getnormalized() | |
546 |
|
548 | |||
547 | # fast case first, no offsets, no fuzz |
|
549 | # fast case first, no offsets, no fuzz | |
548 | old = h.old() |
|
550 | old = h.old() | |
549 | # patch starts counting at 1 unless we are adding the file |
|
551 | # patch starts counting at 1 unless we are adding the file | |
550 | if h.starta == 0: |
|
552 | if h.starta == 0: | |
551 | start = 0 |
|
553 | start = 0 | |
552 | else: |
|
554 | else: | |
553 | start = h.starta + self.offset - 1 |
|
555 | start = h.starta + self.offset - 1 | |
554 | orig_start = start |
|
556 | orig_start = start | |
555 | # if there's skew we want to emit the "(offset %d lines)" even |
|
557 | # if there's skew we want to emit the "(offset %d lines)" even | |
556 | # when the hunk cleanly applies at start + skew, so skip the |
|
558 | # when the hunk cleanly applies at start + skew, so skip the | |
557 | # fast case code |
|
559 | # fast case code | |
558 | if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0: |
|
560 | if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0: | |
559 | if h.rmfile(): |
|
561 | if h.rmfile(): | |
560 | self.unlink(self.fname) |
|
562 | self.unlink(self.fname) | |
561 | else: |
|
563 | else: | |
562 | self.lines[start : start + h.lena] = h.new() |
|
564 | self.lines[start : start + h.lena] = h.new() | |
563 | self.offset += h.lenb - h.lena |
|
565 | self.offset += h.lenb - h.lena | |
564 | self.dirty = 1 |
|
566 | self.dirty = 1 | |
565 | return 0 |
|
567 | return 0 | |
566 |
|
568 | |||
567 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it |
|
569 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it | |
568 | self.hashlines() |
|
570 | self.hashlines() | |
569 | if h.hunk[-1][0] != ' ': |
|
571 | if h.hunk[-1][0] != ' ': | |
570 | # if the hunk tried to put something at the bottom of the file |
|
572 | # if the hunk tried to put something at the bottom of the file | |
571 | # override the start line and use eof here |
|
573 | # override the start line and use eof here | |
572 | search_start = len(self.lines) |
|
574 | search_start = len(self.lines) | |
573 | else: |
|
575 | else: | |
574 | search_start = orig_start + self.skew |
|
576 | search_start = orig_start + self.skew | |
575 |
|
577 | |||
576 | for fuzzlen in xrange(3): |
|
578 | for fuzzlen in xrange(3): | |
577 | for toponly in [True, False]: |
|
579 | for toponly in [True, False]: | |
578 | old = h.old(fuzzlen, toponly) |
|
580 | old = h.old(fuzzlen, toponly) | |
579 |
|
581 | |||
580 | cand = self.findlines(old[0][1:], search_start) |
|
582 | cand = self.findlines(old[0][1:], search_start) | |
581 | for l in cand: |
|
583 | for l in cand: | |
582 | if diffhelpers.testhunk(old, self.lines, l) == 0: |
|
584 | if diffhelpers.testhunk(old, self.lines, l) == 0: | |
583 | newlines = h.new(fuzzlen, toponly) |
|
585 | newlines = h.new(fuzzlen, toponly) | |
584 | self.lines[l : l + len(old)] = newlines |
|
586 | self.lines[l : l + len(old)] = newlines | |
585 | self.offset += len(newlines) - len(old) |
|
587 | self.offset += len(newlines) - len(old) | |
586 | self.skew = l - orig_start |
|
588 | self.skew = l - orig_start | |
587 | self.dirty = 1 |
|
589 | self.dirty = 1 | |
588 | offset = l - orig_start - fuzzlen |
|
590 | offset = l - orig_start - fuzzlen | |
589 | if fuzzlen: |
|
591 | if fuzzlen: | |
590 | msg = _("Hunk #%d succeeded at %d " |
|
592 | msg = _("Hunk #%d succeeded at %d " | |
591 | "with fuzz %d " |
|
593 | "with fuzz %d " | |
592 | "(offset %d lines).\n") |
|
594 | "(offset %d lines).\n") | |
593 | self.printfile(True) |
|
595 | self.printfile(True) | |
594 | self.ui.warn(msg % |
|
596 | self.ui.warn(msg % | |
595 | (h.number, l + 1, fuzzlen, offset)) |
|
597 | (h.number, l + 1, fuzzlen, offset)) | |
596 | else: |
|
598 | else: | |
597 | msg = _("Hunk #%d succeeded at %d " |
|
599 | msg = _("Hunk #%d succeeded at %d " | |
598 | "(offset %d lines).\n") |
|
600 | "(offset %d lines).\n") | |
599 | self.ui.note(msg % (h.number, l + 1, offset)) |
|
601 | self.ui.note(msg % (h.number, l + 1, offset)) | |
600 | return fuzzlen |
|
602 | return fuzzlen | |
601 | self.printfile(True) |
|
603 | self.printfile(True) | |
602 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) |
|
604 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) | |
603 | self.rej.append(horig) |
|
605 | self.rej.append(horig) | |
604 | return -1 |
|
606 | return -1 | |
605 |
|
607 | |||
606 | class hunk(object): |
|
608 | class hunk(object): | |
607 | def __init__(self, desc, num, lr, context, create=False, remove=False): |
|
609 | def __init__(self, desc, num, lr, context, create=False, remove=False): | |
608 | self.number = num |
|
610 | self.number = num | |
609 | self.desc = desc |
|
611 | self.desc = desc | |
610 | self.hunk = [desc] |
|
612 | self.hunk = [desc] | |
611 | self.a = [] |
|
613 | self.a = [] | |
612 | self.b = [] |
|
614 | self.b = [] | |
613 | self.starta = self.lena = None |
|
615 | self.starta = self.lena = None | |
614 | self.startb = self.lenb = None |
|
616 | self.startb = self.lenb = None | |
615 | if lr is not None: |
|
617 | if lr is not None: | |
616 | if context: |
|
618 | if context: | |
617 | self.read_context_hunk(lr) |
|
619 | self.read_context_hunk(lr) | |
618 | else: |
|
620 | else: | |
619 | self.read_unified_hunk(lr) |
|
621 | self.read_unified_hunk(lr) | |
620 | self.create = create |
|
622 | self.create = create | |
621 | self.remove = remove and not create |
|
623 | self.remove = remove and not create | |
622 |
|
624 | |||
623 | def getnormalized(self): |
|
625 | def getnormalized(self): | |
624 | """Return a copy with line endings normalized to LF.""" |
|
626 | """Return a copy with line endings normalized to LF.""" | |
625 |
|
627 | |||
626 | def normalize(lines): |
|
628 | def normalize(lines): | |
627 | nlines = [] |
|
629 | nlines = [] | |
628 | for line in lines: |
|
630 | for line in lines: | |
629 | if line.endswith('\r\n'): |
|
631 | if line.endswith('\r\n'): | |
630 | line = line[:-2] + '\n' |
|
632 | line = line[:-2] + '\n' | |
631 | nlines.append(line) |
|
633 | nlines.append(line) | |
632 | return nlines |
|
634 | return nlines | |
633 |
|
635 | |||
634 | # Dummy object, it is rebuilt manually |
|
636 | # Dummy object, it is rebuilt manually | |
635 | nh = hunk(self.desc, self.number, None, None, False, False) |
|
637 | nh = hunk(self.desc, self.number, None, None, False, False) | |
636 | nh.number = self.number |
|
638 | nh.number = self.number | |
637 | nh.desc = self.desc |
|
639 | nh.desc = self.desc | |
638 | nh.hunk = self.hunk |
|
640 | nh.hunk = self.hunk | |
639 | nh.a = normalize(self.a) |
|
641 | nh.a = normalize(self.a) | |
640 | nh.b = normalize(self.b) |
|
642 | nh.b = normalize(self.b) | |
641 | nh.starta = self.starta |
|
643 | nh.starta = self.starta | |
642 | nh.startb = self.startb |
|
644 | nh.startb = self.startb | |
643 | nh.lena = self.lena |
|
645 | nh.lena = self.lena | |
644 | nh.lenb = self.lenb |
|
646 | nh.lenb = self.lenb | |
645 | nh.create = self.create |
|
647 | nh.create = self.create | |
646 | nh.remove = self.remove |
|
648 | nh.remove = self.remove | |
647 | return nh |
|
649 | return nh | |
648 |
|
650 | |||
649 | def read_unified_hunk(self, lr): |
|
651 | def read_unified_hunk(self, lr): | |
650 | m = unidesc.match(self.desc) |
|
652 | m = unidesc.match(self.desc) | |
651 | if not m: |
|
653 | if not m: | |
652 | raise PatchError(_("bad hunk #%d") % self.number) |
|
654 | raise PatchError(_("bad hunk #%d") % self.number) | |
653 | self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups() |
|
655 | self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups() | |
654 | if self.lena is None: |
|
656 | if self.lena is None: | |
655 | self.lena = 1 |
|
657 | self.lena = 1 | |
656 | else: |
|
658 | else: | |
657 | self.lena = int(self.lena) |
|
659 | self.lena = int(self.lena) | |
658 | if self.lenb is None: |
|
660 | if self.lenb is None: | |
659 | self.lenb = 1 |
|
661 | self.lenb = 1 | |
660 | else: |
|
662 | else: | |
661 | self.lenb = int(self.lenb) |
|
663 | self.lenb = int(self.lenb) | |
662 | self.starta = int(self.starta) |
|
664 | self.starta = int(self.starta) | |
663 | self.startb = int(self.startb) |
|
665 | self.startb = int(self.startb) | |
664 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b) |
|
666 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b) | |
665 | # if we hit eof before finishing out the hunk, the last line will |
|
667 | # if we hit eof before finishing out the hunk, the last line will | |
666 | # be zero length. Lets try to fix it up. |
|
668 | # be zero length. Lets try to fix it up. | |
667 | while len(self.hunk[-1]) == 0: |
|
669 | while len(self.hunk[-1]) == 0: | |
668 | del self.hunk[-1] |
|
670 | del self.hunk[-1] | |
669 | del self.a[-1] |
|
671 | del self.a[-1] | |
670 | del self.b[-1] |
|
672 | del self.b[-1] | |
671 | self.lena -= 1 |
|
673 | self.lena -= 1 | |
672 | self.lenb -= 1 |
|
674 | self.lenb -= 1 | |
673 |
|
675 | |||
674 | def read_context_hunk(self, lr): |
|
676 | def read_context_hunk(self, lr): | |
675 | self.desc = lr.readline() |
|
677 | self.desc = lr.readline() | |
676 | m = contextdesc.match(self.desc) |
|
678 | m = contextdesc.match(self.desc) | |
677 | if not m: |
|
679 | if not m: | |
678 | raise PatchError(_("bad hunk #%d") % self.number) |
|
680 | raise PatchError(_("bad hunk #%d") % self.number) | |
679 | foo, self.starta, foo2, aend, foo3 = m.groups() |
|
681 | foo, self.starta, foo2, aend, foo3 = m.groups() | |
680 | self.starta = int(self.starta) |
|
682 | self.starta = int(self.starta) | |
681 | if aend is None: |
|
683 | if aend is None: | |
682 | aend = self.starta |
|
684 | aend = self.starta | |
683 | self.lena = int(aend) - self.starta |
|
685 | self.lena = int(aend) - self.starta | |
684 | if self.starta: |
|
686 | if self.starta: | |
685 | self.lena += 1 |
|
687 | self.lena += 1 | |
686 | for x in xrange(self.lena): |
|
688 | for x in xrange(self.lena): | |
687 | l = lr.readline() |
|
689 | l = lr.readline() | |
688 | if l.startswith('---'): |
|
690 | if l.startswith('---'): | |
689 | # lines addition, old block is empty |
|
691 | # lines addition, old block is empty | |
690 | lr.push(l) |
|
692 | lr.push(l) | |
691 | break |
|
693 | break | |
692 | s = l[2:] |
|
694 | s = l[2:] | |
693 | if l.startswith('- ') or l.startswith('! '): |
|
695 | if l.startswith('- ') or l.startswith('! '): | |
694 | u = '-' + s |
|
696 | u = '-' + s | |
695 | elif l.startswith(' '): |
|
697 | elif l.startswith(' '): | |
696 | u = ' ' + s |
|
698 | u = ' ' + s | |
697 | else: |
|
699 | else: | |
698 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
700 | raise PatchError(_("bad hunk #%d old text line %d") % | |
699 | (self.number, x)) |
|
701 | (self.number, x)) | |
700 | self.a.append(u) |
|
702 | self.a.append(u) | |
701 | self.hunk.append(u) |
|
703 | self.hunk.append(u) | |
702 |
|
704 | |||
703 | l = lr.readline() |
|
705 | l = lr.readline() | |
704 | if l.startswith('\ '): |
|
706 | if l.startswith('\ '): | |
705 | s = self.a[-1][:-1] |
|
707 | s = self.a[-1][:-1] | |
706 | self.a[-1] = s |
|
708 | self.a[-1] = s | |
707 | self.hunk[-1] = s |
|
709 | self.hunk[-1] = s | |
708 | l = lr.readline() |
|
710 | l = lr.readline() | |
709 | m = contextdesc.match(l) |
|
711 | m = contextdesc.match(l) | |
710 | if not m: |
|
712 | if not m: | |
711 | raise PatchError(_("bad hunk #%d") % self.number) |
|
713 | raise PatchError(_("bad hunk #%d") % self.number) | |
712 | foo, self.startb, foo2, bend, foo3 = m.groups() |
|
714 | foo, self.startb, foo2, bend, foo3 = m.groups() | |
713 | self.startb = int(self.startb) |
|
715 | self.startb = int(self.startb) | |
714 | if bend is None: |
|
716 | if bend is None: | |
715 | bend = self.startb |
|
717 | bend = self.startb | |
716 | self.lenb = int(bend) - self.startb |
|
718 | self.lenb = int(bend) - self.startb | |
717 | if self.startb: |
|
719 | if self.startb: | |
718 | self.lenb += 1 |
|
720 | self.lenb += 1 | |
719 | hunki = 1 |
|
721 | hunki = 1 | |
720 | for x in xrange(self.lenb): |
|
722 | for x in xrange(self.lenb): | |
721 | l = lr.readline() |
|
723 | l = lr.readline() | |
722 | if l.startswith('\ '): |
|
724 | if l.startswith('\ '): | |
723 | # XXX: the only way to hit this is with an invalid line range. |
|
725 | # XXX: the only way to hit this is with an invalid line range. | |
724 | # The no-eol marker is not counted in the line range, but I |
|
726 | # The no-eol marker is not counted in the line range, but I | |
725 | # guess there are diff(1) out there which behave differently. |
|
727 | # guess there are diff(1) out there which behave differently. | |
726 | s = self.b[-1][:-1] |
|
728 | s = self.b[-1][:-1] | |
727 | self.b[-1] = s |
|
729 | self.b[-1] = s | |
728 | self.hunk[hunki - 1] = s |
|
730 | self.hunk[hunki - 1] = s | |
729 | continue |
|
731 | continue | |
730 | if not l: |
|
732 | if not l: | |
731 | # line deletions, new block is empty and we hit EOF |
|
733 | # line deletions, new block is empty and we hit EOF | |
732 | lr.push(l) |
|
734 | lr.push(l) | |
733 | break |
|
735 | break | |
734 | s = l[2:] |
|
736 | s = l[2:] | |
735 | if l.startswith('+ ') or l.startswith('! '): |
|
737 | if l.startswith('+ ') or l.startswith('! '): | |
736 | u = '+' + s |
|
738 | u = '+' + s | |
737 | elif l.startswith(' '): |
|
739 | elif l.startswith(' '): | |
738 | u = ' ' + s |
|
740 | u = ' ' + s | |
739 | elif len(self.b) == 0: |
|
741 | elif len(self.b) == 0: | |
740 | # line deletions, new block is empty |
|
742 | # line deletions, new block is empty | |
741 | lr.push(l) |
|
743 | lr.push(l) | |
742 | break |
|
744 | break | |
743 | else: |
|
745 | else: | |
744 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
746 | raise PatchError(_("bad hunk #%d old text line %d") % | |
745 | (self.number, x)) |
|
747 | (self.number, x)) | |
746 | self.b.append(s) |
|
748 | self.b.append(s) | |
747 | while True: |
|
749 | while True: | |
748 | if hunki >= len(self.hunk): |
|
750 | if hunki >= len(self.hunk): | |
749 | h = "" |
|
751 | h = "" | |
750 | else: |
|
752 | else: | |
751 | h = self.hunk[hunki] |
|
753 | h = self.hunk[hunki] | |
752 | hunki += 1 |
|
754 | hunki += 1 | |
753 | if h == u: |
|
755 | if h == u: | |
754 | break |
|
756 | break | |
755 | elif h.startswith('-'): |
|
757 | elif h.startswith('-'): | |
756 | continue |
|
758 | continue | |
757 | else: |
|
759 | else: | |
758 | self.hunk.insert(hunki - 1, u) |
|
760 | self.hunk.insert(hunki - 1, u) | |
759 | break |
|
761 | break | |
760 |
|
762 | |||
761 | if not self.a: |
|
763 | if not self.a: | |
762 | # this happens when lines were only added to the hunk |
|
764 | # this happens when lines were only added to the hunk | |
763 | for x in self.hunk: |
|
765 | for x in self.hunk: | |
764 | if x.startswith('-') or x.startswith(' '): |
|
766 | if x.startswith('-') or x.startswith(' '): | |
765 | self.a.append(x) |
|
767 | self.a.append(x) | |
766 | if not self.b: |
|
768 | if not self.b: | |
767 | # this happens when lines were only deleted from the hunk |
|
769 | # this happens when lines were only deleted from the hunk | |
768 | for x in self.hunk: |
|
770 | for x in self.hunk: | |
769 | if x.startswith('+') or x.startswith(' '): |
|
771 | if x.startswith('+') or x.startswith(' '): | |
770 | self.b.append(x[1:]) |
|
772 | self.b.append(x[1:]) | |
771 | # @@ -start,len +start,len @@ |
|
773 | # @@ -start,len +start,len @@ | |
772 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, |
|
774 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, | |
773 | self.startb, self.lenb) |
|
775 | self.startb, self.lenb) | |
774 | self.hunk[0] = self.desc |
|
776 | self.hunk[0] = self.desc | |
775 |
|
777 | |||
776 | def fix_newline(self): |
|
778 | def fix_newline(self): | |
777 | diffhelpers.fix_newline(self.hunk, self.a, self.b) |
|
779 | diffhelpers.fix_newline(self.hunk, self.a, self.b) | |
778 |
|
780 | |||
779 | def complete(self): |
|
781 | def complete(self): | |
780 | return len(self.a) == self.lena and len(self.b) == self.lenb |
|
782 | return len(self.a) == self.lena and len(self.b) == self.lenb | |
781 |
|
783 | |||
782 | def createfile(self): |
|
784 | def createfile(self): | |
783 | return self.starta == 0 and self.lena == 0 and self.create |
|
785 | return self.starta == 0 and self.lena == 0 and self.create | |
784 |
|
786 | |||
785 | def rmfile(self): |
|
787 | def rmfile(self): | |
786 | return self.startb == 0 and self.lenb == 0 and self.remove |
|
788 | return self.startb == 0 and self.lenb == 0 and self.remove | |
787 |
|
789 | |||
788 | def fuzzit(self, l, fuzz, toponly): |
|
790 | def fuzzit(self, l, fuzz, toponly): | |
789 | # this removes context lines from the top and bottom of list 'l'. It |
|
791 | # this removes context lines from the top and bottom of list 'l'. It | |
790 | # checks the hunk to make sure only context lines are removed, and then |
|
792 | # checks the hunk to make sure only context lines are removed, and then | |
791 | # returns a new shortened list of lines. |
|
793 | # returns a new shortened list of lines. | |
792 | fuzz = min(fuzz, len(l)-1) |
|
794 | fuzz = min(fuzz, len(l)-1) | |
793 | if fuzz: |
|
795 | if fuzz: | |
794 | top = 0 |
|
796 | top = 0 | |
795 | bot = 0 |
|
797 | bot = 0 | |
796 | hlen = len(self.hunk) |
|
798 | hlen = len(self.hunk) | |
797 | for x in xrange(hlen - 1): |
|
799 | for x in xrange(hlen - 1): | |
798 | # the hunk starts with the @@ line, so use x+1 |
|
800 | # the hunk starts with the @@ line, so use x+1 | |
799 | if self.hunk[x + 1][0] == ' ': |
|
801 | if self.hunk[x + 1][0] == ' ': | |
800 | top += 1 |
|
802 | top += 1 | |
801 | else: |
|
803 | else: | |
802 | break |
|
804 | break | |
803 | if not toponly: |
|
805 | if not toponly: | |
804 | for x in xrange(hlen - 1): |
|
806 | for x in xrange(hlen - 1): | |
805 | if self.hunk[hlen - bot - 1][0] == ' ': |
|
807 | if self.hunk[hlen - bot - 1][0] == ' ': | |
806 | bot += 1 |
|
808 | bot += 1 | |
807 | else: |
|
809 | else: | |
808 | break |
|
810 | break | |
809 |
|
811 | |||
810 | # top and bot now count context in the hunk |
|
812 | # top and bot now count context in the hunk | |
811 | # adjust them if either one is short |
|
813 | # adjust them if either one is short | |
812 | context = max(top, bot, 3) |
|
814 | context = max(top, bot, 3) | |
813 | if bot < context: |
|
815 | if bot < context: | |
814 | bot = max(0, fuzz - (context - bot)) |
|
816 | bot = max(0, fuzz - (context - bot)) | |
815 | else: |
|
817 | else: | |
816 | bot = min(fuzz, bot) |
|
818 | bot = min(fuzz, bot) | |
817 | if top < context: |
|
819 | if top < context: | |
818 | top = max(0, fuzz - (context - top)) |
|
820 | top = max(0, fuzz - (context - top)) | |
819 | else: |
|
821 | else: | |
820 | top = min(fuzz, top) |
|
822 | top = min(fuzz, top) | |
821 |
|
823 | |||
822 | return l[top:len(l)-bot] |
|
824 | return l[top:len(l)-bot] | |
823 | return l |
|
825 | return l | |
824 |
|
826 | |||
825 | def old(self, fuzz=0, toponly=False): |
|
827 | def old(self, fuzz=0, toponly=False): | |
826 | return self.fuzzit(self.a, fuzz, toponly) |
|
828 | return self.fuzzit(self.a, fuzz, toponly) | |
827 |
|
829 | |||
828 | def new(self, fuzz=0, toponly=False): |
|
830 | def new(self, fuzz=0, toponly=False): | |
829 | return self.fuzzit(self.b, fuzz, toponly) |
|
831 | return self.fuzzit(self.b, fuzz, toponly) | |
830 |
|
832 | |||
831 | class binhunk: |
|
833 | class binhunk: | |
832 | 'A binary patch file. Only understands literals so far.' |
|
834 | 'A binary patch file. Only understands literals so far.' | |
833 | def __init__(self, gitpatch): |
|
835 | def __init__(self, gitpatch): | |
834 | self.gitpatch = gitpatch |
|
836 | self.gitpatch = gitpatch | |
835 | self.text = None |
|
837 | self.text = None | |
836 | self.hunk = ['GIT binary patch\n'] |
|
838 | self.hunk = ['GIT binary patch\n'] | |
837 |
|
839 | |||
838 | def createfile(self): |
|
840 | def createfile(self): | |
839 | return self.gitpatch.op in ('ADD', 'RENAME', 'COPY') |
|
841 | return self.gitpatch.op in ('ADD', 'RENAME', 'COPY') | |
840 |
|
842 | |||
841 | def rmfile(self): |
|
843 | def rmfile(self): | |
842 | return self.gitpatch.op == 'DELETE' |
|
844 | return self.gitpatch.op == 'DELETE' | |
843 |
|
845 | |||
844 | def complete(self): |
|
846 | def complete(self): | |
845 | return self.text is not None |
|
847 | return self.text is not None | |
846 |
|
848 | |||
847 | def new(self): |
|
849 | def new(self): | |
848 | return [self.text] |
|
850 | return [self.text] | |
849 |
|
851 | |||
850 | def extract(self, lr): |
|
852 | def extract(self, lr): | |
851 | line = lr.readline() |
|
853 | line = lr.readline() | |
852 | self.hunk.append(line) |
|
854 | self.hunk.append(line) | |
853 | while line and not line.startswith('literal '): |
|
855 | while line and not line.startswith('literal '): | |
854 | line = lr.readline() |
|
856 | line = lr.readline() | |
855 | self.hunk.append(line) |
|
857 | self.hunk.append(line) | |
856 | if not line: |
|
858 | if not line: | |
857 | raise PatchError(_('could not extract binary patch')) |
|
859 | raise PatchError(_('could not extract binary patch')) | |
858 | size = int(line[8:].rstrip()) |
|
860 | size = int(line[8:].rstrip()) | |
859 | dec = [] |
|
861 | dec = [] | |
860 | line = lr.readline() |
|
862 | line = lr.readline() | |
861 | self.hunk.append(line) |
|
863 | self.hunk.append(line) | |
862 | while len(line) > 1: |
|
864 | while len(line) > 1: | |
863 | l = line[0] |
|
865 | l = line[0] | |
864 | if l <= 'Z' and l >= 'A': |
|
866 | if l <= 'Z' and l >= 'A': | |
865 | l = ord(l) - ord('A') + 1 |
|
867 | l = ord(l) - ord('A') + 1 | |
866 | else: |
|
868 | else: | |
867 | l = ord(l) - ord('a') + 27 |
|
869 | l = ord(l) - ord('a') + 27 | |
868 | dec.append(base85.b85decode(line[1:-1])[:l]) |
|
870 | dec.append(base85.b85decode(line[1:-1])[:l]) | |
869 | line = lr.readline() |
|
871 | line = lr.readline() | |
870 | self.hunk.append(line) |
|
872 | self.hunk.append(line) | |
871 | text = zlib.decompress(''.join(dec)) |
|
873 | text = zlib.decompress(''.join(dec)) | |
872 | if len(text) != size: |
|
874 | if len(text) != size: | |
873 | raise PatchError(_('binary patch is %d bytes, not %d') % |
|
875 | raise PatchError(_('binary patch is %d bytes, not %d') % | |
874 | len(text), size) |
|
876 | len(text), size) | |
875 | self.text = text |
|
877 | self.text = text | |
876 |
|
878 | |||
877 | def parsefilename(str): |
|
879 | def parsefilename(str): | |
878 | # --- filename \t|space stuff |
|
880 | # --- filename \t|space stuff | |
879 | s = str[4:].rstrip('\r\n') |
|
881 | s = str[4:].rstrip('\r\n') | |
880 | i = s.find('\t') |
|
882 | i = s.find('\t') | |
881 | if i < 0: |
|
883 | if i < 0: | |
882 | i = s.find(' ') |
|
884 | i = s.find(' ') | |
883 | if i < 0: |
|
885 | if i < 0: | |
884 | return s |
|
886 | return s | |
885 | return s[:i] |
|
887 | return s[:i] | |
886 |
|
888 | |||
887 | def pathstrip(path, strip): |
|
889 | def pathstrip(path, strip): | |
888 | pathlen = len(path) |
|
890 | pathlen = len(path) | |
889 | i = 0 |
|
891 | i = 0 | |
890 | if strip == 0: |
|
892 | if strip == 0: | |
891 | return '', path.rstrip() |
|
893 | return '', path.rstrip() | |
892 | count = strip |
|
894 | count = strip | |
893 | while count > 0: |
|
895 | while count > 0: | |
894 | i = path.find('/', i) |
|
896 | i = path.find('/', i) | |
895 | if i == -1: |
|
897 | if i == -1: | |
896 | raise PatchError(_("unable to strip away %d of %d dirs from %s") % |
|
898 | raise PatchError(_("unable to strip away %d of %d dirs from %s") % | |
897 | (count, strip, path)) |
|
899 | (count, strip, path)) | |
898 | i += 1 |
|
900 | i += 1 | |
899 | # consume '//' in the path |
|
901 | # consume '//' in the path | |
900 | while i < pathlen - 1 and path[i] == '/': |
|
902 | while i < pathlen - 1 and path[i] == '/': | |
901 | i += 1 |
|
903 | i += 1 | |
902 | count -= 1 |
|
904 | count -= 1 | |
903 | return path[:i].lstrip(), path[i:].rstrip() |
|
905 | return path[:i].lstrip(), path[i:].rstrip() | |
904 |
|
906 | |||
905 | def selectfile(afile_orig, bfile_orig, hunk, strip): |
|
907 | def selectfile(afile_orig, bfile_orig, hunk, strip): | |
906 | nulla = afile_orig == "/dev/null" |
|
908 | nulla = afile_orig == "/dev/null" | |
907 | nullb = bfile_orig == "/dev/null" |
|
909 | nullb = bfile_orig == "/dev/null" | |
908 | abase, afile = pathstrip(afile_orig, strip) |
|
910 | abase, afile = pathstrip(afile_orig, strip) | |
909 | gooda = not nulla and os.path.lexists(afile) |
|
911 | gooda = not nulla and os.path.lexists(afile) | |
910 | bbase, bfile = pathstrip(bfile_orig, strip) |
|
912 | bbase, bfile = pathstrip(bfile_orig, strip) | |
911 | if afile == bfile: |
|
913 | if afile == bfile: | |
912 | goodb = gooda |
|
914 | goodb = gooda | |
913 | else: |
|
915 | else: | |
914 | goodb = not nullb and os.path.lexists(bfile) |
|
916 | goodb = not nullb and os.path.lexists(bfile) | |
915 | createfunc = hunk.createfile |
|
917 | createfunc = hunk.createfile | |
916 | missing = not goodb and not gooda and not createfunc() |
|
918 | missing = not goodb and not gooda and not createfunc() | |
917 |
|
919 | |||
918 | # some diff programs apparently produce patches where the afile is |
|
920 | # some diff programs apparently produce patches where the afile is | |
919 | # not /dev/null, but afile starts with bfile |
|
921 | # not /dev/null, but afile starts with bfile | |
920 | abasedir = afile[:afile.rfind('/') + 1] |
|
922 | abasedir = afile[:afile.rfind('/') + 1] | |
921 | bbasedir = bfile[:bfile.rfind('/') + 1] |
|
923 | bbasedir = bfile[:bfile.rfind('/') + 1] | |
922 | if missing and abasedir == bbasedir and afile.startswith(bfile): |
|
924 | if missing and abasedir == bbasedir and afile.startswith(bfile): | |
923 | # this isn't very pretty |
|
925 | # this isn't very pretty | |
924 | hunk.create = True |
|
926 | hunk.create = True | |
925 | if createfunc(): |
|
927 | if createfunc(): | |
926 | missing = False |
|
928 | missing = False | |
927 | else: |
|
929 | else: | |
928 | hunk.create = False |
|
930 | hunk.create = False | |
929 |
|
931 | |||
930 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the |
|
932 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the | |
931 | # diff is between a file and its backup. In this case, the original |
|
933 | # diff is between a file and its backup. In this case, the original | |
932 | # file should be patched (see original mpatch code). |
|
934 | # file should be patched (see original mpatch code). | |
933 | isbackup = (abase == bbase and bfile.startswith(afile)) |
|
935 | isbackup = (abase == bbase and bfile.startswith(afile)) | |
934 | fname = None |
|
936 | fname = None | |
935 | if not missing: |
|
937 | if not missing: | |
936 | if gooda and goodb: |
|
938 | if gooda and goodb: | |
937 | fname = isbackup and afile or bfile |
|
939 | fname = isbackup and afile or bfile | |
938 | elif gooda: |
|
940 | elif gooda: | |
939 | fname = afile |
|
941 | fname = afile | |
940 |
|
942 | |||
941 | if not fname: |
|
943 | if not fname: | |
942 | if not nullb: |
|
944 | if not nullb: | |
943 | fname = isbackup and afile or bfile |
|
945 | fname = isbackup and afile or bfile | |
944 | elif not nulla: |
|
946 | elif not nulla: | |
945 | fname = afile |
|
947 | fname = afile | |
946 | else: |
|
948 | else: | |
947 | raise PatchError(_("undefined source and destination files")) |
|
949 | raise PatchError(_("undefined source and destination files")) | |
948 |
|
950 | |||
949 | return fname, missing |
|
951 | return fname, missing | |
950 |
|
952 | |||
951 | def scangitpatch(lr, firstline): |
|
953 | def scangitpatch(lr, firstline): | |
952 | """ |
|
954 | """ | |
953 | Git patches can emit: |
|
955 | Git patches can emit: | |
954 | - rename a to b |
|
956 | - rename a to b | |
955 | - change b |
|
957 | - change b | |
956 | - copy a to c |
|
958 | - copy a to c | |
957 | - change c |
|
959 | - change c | |
958 |
|
960 | |||
959 | We cannot apply this sequence as-is, the renamed 'a' could not be |
|
961 | We cannot apply this sequence as-is, the renamed 'a' could not be | |
960 | found for it would have been renamed already. And we cannot copy |
|
962 | found for it would have been renamed already. And we cannot copy | |
961 | from 'b' instead because 'b' would have been changed already. So |
|
963 | from 'b' instead because 'b' would have been changed already. So | |
962 | we scan the git patch for copy and rename commands so we can |
|
964 | we scan the git patch for copy and rename commands so we can | |
963 | perform the copies ahead of time. |
|
965 | perform the copies ahead of time. | |
964 | """ |
|
966 | """ | |
965 | pos = 0 |
|
967 | pos = 0 | |
966 | try: |
|
968 | try: | |
967 | pos = lr.fp.tell() |
|
969 | pos = lr.fp.tell() | |
968 | fp = lr.fp |
|
970 | fp = lr.fp | |
969 | except IOError: |
|
971 | except IOError: | |
970 | fp = cStringIO.StringIO(lr.fp.read()) |
|
972 | fp = cStringIO.StringIO(lr.fp.read()) | |
971 | gitlr = linereader(fp, lr.textmode) |
|
973 | gitlr = linereader(fp, lr.textmode) | |
972 | gitlr.push(firstline) |
|
974 | gitlr.push(firstline) | |
973 | gitpatches = readgitpatch(gitlr) |
|
975 | gitpatches = readgitpatch(gitlr) | |
974 | fp.seek(pos) |
|
976 | fp.seek(pos) | |
975 | return gitpatches |
|
977 | return gitpatches | |
976 |
|
978 | |||
977 | def iterhunks(ui, fp): |
|
979 | def iterhunks(ui, fp): | |
978 | """Read a patch and yield the following events: |
|
980 | """Read a patch and yield the following events: | |
979 | - ("file", afile, bfile, firsthunk): select a new target file. |
|
981 | - ("file", afile, bfile, firsthunk): select a new target file. | |
980 | - ("hunk", hunk): a new hunk is ready to be applied, follows a |
|
982 | - ("hunk", hunk): a new hunk is ready to be applied, follows a | |
981 | "file" event. |
|
983 | "file" event. | |
982 | - ("git", gitchanges): current diff is in git format, gitchanges |
|
984 | - ("git", gitchanges): current diff is in git format, gitchanges | |
983 | maps filenames to gitpatch records. Unique event. |
|
985 | maps filenames to gitpatch records. Unique event. | |
984 | """ |
|
986 | """ | |
985 | changed = {} |
|
987 | changed = {} | |
986 | current_hunk = None |
|
988 | current_hunk = None | |
987 | afile = "" |
|
989 | afile = "" | |
988 | bfile = "" |
|
990 | bfile = "" | |
989 | state = None |
|
991 | state = None | |
990 | hunknum = 0 |
|
992 | hunknum = 0 | |
991 | emitfile = False |
|
993 | emitfile = False | |
992 | git = False |
|
994 | git = False | |
993 |
|
995 | |||
994 | # our states |
|
996 | # our states | |
995 | BFILE = 1 |
|
997 | BFILE = 1 | |
996 | context = None |
|
998 | context = None | |
997 | lr = linereader(fp) |
|
999 | lr = linereader(fp) | |
998 |
|
1000 | |||
999 | while True: |
|
1001 | while True: | |
1000 | newfile = newgitfile = False |
|
1002 | newfile = newgitfile = False | |
1001 | x = lr.readline() |
|
1003 | x = lr.readline() | |
1002 | if not x: |
|
1004 | if not x: | |
1003 | break |
|
1005 | break | |
1004 | if current_hunk: |
|
1006 | if current_hunk: | |
1005 | if x.startswith('\ '): |
|
1007 | if x.startswith('\ '): | |
1006 | current_hunk.fix_newline() |
|
1008 | current_hunk.fix_newline() | |
1007 | yield 'hunk', current_hunk |
|
1009 | yield 'hunk', current_hunk | |
1008 | current_hunk = None |
|
1010 | current_hunk = None | |
1009 | if (state == BFILE and ((not context and x[0] == '@') or |
|
1011 | if (state == BFILE and ((not context and x[0] == '@') or | |
1010 | ((context is not False) and x.startswith('***************')))): |
|
1012 | ((context is not False) and x.startswith('***************')))): | |
1011 | if context is None and x.startswith('***************'): |
|
1013 | if context is None and x.startswith('***************'): | |
1012 | context = True |
|
1014 | context = True | |
1013 | gpatch = changed.get(bfile) |
|
1015 | gpatch = changed.get(bfile) | |
1014 | create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' |
|
1016 | create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' | |
1015 | remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' |
|
1017 | remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' | |
1016 | current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) |
|
1018 | current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) | |
1017 | hunknum += 1 |
|
1019 | hunknum += 1 | |
1018 | if emitfile: |
|
1020 | if emitfile: | |
1019 | emitfile = False |
|
1021 | emitfile = False | |
1020 | yield 'file', (afile, bfile, current_hunk) |
|
1022 | yield 'file', (afile, bfile, current_hunk) | |
1021 | elif state == BFILE and x.startswith('GIT binary patch'): |
|
1023 | elif state == BFILE and x.startswith('GIT binary patch'): | |
1022 | current_hunk = binhunk(changed[bfile]) |
|
1024 | current_hunk = binhunk(changed[bfile]) | |
1023 | hunknum += 1 |
|
1025 | hunknum += 1 | |
1024 | if emitfile: |
|
1026 | if emitfile: | |
1025 | emitfile = False |
|
1027 | emitfile = False | |
1026 | yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) |
|
1028 | yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) | |
1027 | current_hunk.extract(lr) |
|
1029 | current_hunk.extract(lr) | |
1028 | elif x.startswith('diff --git'): |
|
1030 | elif x.startswith('diff --git'): | |
1029 | # check for git diff, scanning the whole patch file if needed |
|
1031 | # check for git diff, scanning the whole patch file if needed | |
1030 | m = gitre.match(x) |
|
1032 | m = gitre.match(x) | |
1031 | if m: |
|
1033 | if m: | |
1032 | afile, bfile = m.group(1, 2) |
|
1034 | afile, bfile = m.group(1, 2) | |
1033 | if not git: |
|
1035 | if not git: | |
1034 | git = True |
|
1036 | git = True | |
1035 | gitpatches = scangitpatch(lr, x) |
|
1037 | gitpatches = scangitpatch(lr, x) | |
1036 | yield 'git', gitpatches |
|
1038 | yield 'git', gitpatches | |
1037 | for gp in gitpatches: |
|
1039 | for gp in gitpatches: | |
1038 | changed[gp.path] = gp |
|
1040 | changed[gp.path] = gp | |
1039 | # else error? |
|
1041 | # else error? | |
1040 | # copy/rename + modify should modify target, not source |
|
1042 | # copy/rename + modify should modify target, not source | |
1041 | gp = changed.get(bfile) |
|
1043 | gp = changed.get(bfile) | |
1042 | if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') |
|
1044 | if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') | |
1043 | or gp.mode): |
|
1045 | or gp.mode): | |
1044 | afile = bfile |
|
1046 | afile = bfile | |
1045 | newgitfile = True |
|
1047 | newgitfile = True | |
1046 | elif x.startswith('---'): |
|
1048 | elif x.startswith('---'): | |
1047 | # check for a unified diff |
|
1049 | # check for a unified diff | |
1048 | l2 = lr.readline() |
|
1050 | l2 = lr.readline() | |
1049 | if not l2.startswith('+++'): |
|
1051 | if not l2.startswith('+++'): | |
1050 | lr.push(l2) |
|
1052 | lr.push(l2) | |
1051 | continue |
|
1053 | continue | |
1052 | newfile = True |
|
1054 | newfile = True | |
1053 | context = False |
|
1055 | context = False | |
1054 | afile = parsefilename(x) |
|
1056 | afile = parsefilename(x) | |
1055 | bfile = parsefilename(l2) |
|
1057 | bfile = parsefilename(l2) | |
1056 | elif x.startswith('***'): |
|
1058 | elif x.startswith('***'): | |
1057 | # check for a context diff |
|
1059 | # check for a context diff | |
1058 | l2 = lr.readline() |
|
1060 | l2 = lr.readline() | |
1059 | if not l2.startswith('---'): |
|
1061 | if not l2.startswith('---'): | |
1060 | lr.push(l2) |
|
1062 | lr.push(l2) | |
1061 | continue |
|
1063 | continue | |
1062 | l3 = lr.readline() |
|
1064 | l3 = lr.readline() | |
1063 | lr.push(l3) |
|
1065 | lr.push(l3) | |
1064 | if not l3.startswith("***************"): |
|
1066 | if not l3.startswith("***************"): | |
1065 | lr.push(l2) |
|
1067 | lr.push(l2) | |
1066 | continue |
|
1068 | continue | |
1067 | newfile = True |
|
1069 | newfile = True | |
1068 | context = True |
|
1070 | context = True | |
1069 | afile = parsefilename(x) |
|
1071 | afile = parsefilename(x) | |
1070 | bfile = parsefilename(l2) |
|
1072 | bfile = parsefilename(l2) | |
1071 |
|
1073 | |||
1072 | if newgitfile or newfile: |
|
1074 | if newgitfile or newfile: | |
1073 | emitfile = True |
|
1075 | emitfile = True | |
1074 | state = BFILE |
|
1076 | state = BFILE | |
1075 | hunknum = 0 |
|
1077 | hunknum = 0 | |
1076 | if current_hunk: |
|
1078 | if current_hunk: | |
1077 | if current_hunk.complete(): |
|
1079 | if current_hunk.complete(): | |
1078 | yield 'hunk', current_hunk |
|
1080 | yield 'hunk', current_hunk | |
1079 | else: |
|
1081 | else: | |
1080 | raise PatchError(_("malformed patch %s %s") % (afile, |
|
1082 | raise PatchError(_("malformed patch %s %s") % (afile, | |
1081 | current_hunk.desc)) |
|
1083 | current_hunk.desc)) | |
1082 |
|
1084 | |||
1083 | def applydiff(ui, fp, changed, strip=1, eolmode='strict'): |
|
1085 | def applydiff(ui, fp, changed, strip=1, eolmode='strict'): | |
1084 | """Reads a patch from fp and tries to apply it. |
|
1086 | """Reads a patch from fp and tries to apply it. | |
1085 |
|
1087 | |||
1086 | The dict 'changed' is filled in with all of the filenames changed |
|
1088 | The dict 'changed' is filled in with all of the filenames changed | |
1087 | by the patch. Returns 0 for a clean patch, -1 if any rejects were |
|
1089 | by the patch. Returns 0 for a clean patch, -1 if any rejects were | |
1088 | found and 1 if there was any fuzz. |
|
1090 | found and 1 if there was any fuzz. | |
1089 |
|
1091 | |||
1090 | If 'eolmode' is 'strict', the patch content and patched file are |
|
1092 | If 'eolmode' is 'strict', the patch content and patched file are | |
1091 | read in binary mode. Otherwise, line endings are ignored when |
|
1093 | read in binary mode. Otherwise, line endings are ignored when | |
1092 | patching then normalized according to 'eolmode'. |
|
1094 | patching then normalized according to 'eolmode'. | |
1093 |
|
1095 | |||
1094 | Callers probably want to call 'cmdutil.updatedir' after this to |
|
1096 | Callers probably want to call 'cmdutil.updatedir' after this to | |
1095 | apply certain categories of changes not done by this function. |
|
1097 | apply certain categories of changes not done by this function. | |
1096 | """ |
|
1098 | """ | |
1097 | return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip, |
|
1099 | return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip, | |
1098 | eolmode=eolmode) |
|
1100 | eolmode=eolmode) | |
1099 |
|
1101 | |||
1100 | def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'): |
|
1102 | def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'): | |
1101 | rejects = 0 |
|
1103 | rejects = 0 | |
1102 | err = 0 |
|
1104 | err = 0 | |
1103 | current_file = None |
|
1105 | current_file = None | |
1104 | cwd = os.getcwd() |
|
1106 | cwd = os.getcwd() | |
1105 | opener = util.opener(cwd) |
|
1107 | opener = util.opener(cwd) | |
1106 |
|
1108 | |||
1107 | def closefile(): |
|
1109 | def closefile(): | |
1108 | if not current_file: |
|
1110 | if not current_file: | |
1109 | return 0 |
|
1111 | return 0 | |
1110 | if current_file.dirty: |
|
1112 | if current_file.dirty: | |
1111 | current_file.writelines(current_file.fname, current_file.lines) |
|
1113 | current_file.writelines(current_file.fname, current_file.lines) | |
1112 | current_file.write_rej() |
|
1114 | current_file.write_rej() | |
1113 | return len(current_file.rej) |
|
1115 | return len(current_file.rej) | |
1114 |
|
1116 | |||
1115 | for state, values in iterhunks(ui, fp): |
|
1117 | for state, values in iterhunks(ui, fp): | |
1116 | if state == 'hunk': |
|
1118 | if state == 'hunk': | |
1117 | if not current_file: |
|
1119 | if not current_file: | |
1118 | continue |
|
1120 | continue | |
1119 | ret = current_file.apply(values) |
|
1121 | ret = current_file.apply(values) | |
1120 | if ret >= 0: |
|
1122 | if ret >= 0: | |
1121 | changed.setdefault(current_file.fname, None) |
|
1123 | changed.setdefault(current_file.fname, None) | |
1122 | if ret > 0: |
|
1124 | if ret > 0: | |
1123 | err = 1 |
|
1125 | err = 1 | |
1124 | elif state == 'file': |
|
1126 | elif state == 'file': | |
1125 | rejects += closefile() |
|
1127 | rejects += closefile() | |
1126 | afile, bfile, first_hunk = values |
|
1128 | afile, bfile, first_hunk = values | |
1127 | try: |
|
1129 | try: | |
1128 | current_file, missing = selectfile(afile, bfile, |
|
1130 | current_file, missing = selectfile(afile, bfile, | |
1129 | first_hunk, strip) |
|
1131 | first_hunk, strip) | |
1130 | current_file = patcher(ui, current_file, opener, |
|
1132 | current_file = patcher(ui, current_file, opener, | |
1131 | missing=missing, eolmode=eolmode) |
|
1133 | missing=missing, eolmode=eolmode) | |
1132 | except PatchError, err: |
|
1134 | except PatchError, err: | |
1133 | ui.warn(str(err) + '\n') |
|
1135 | ui.warn(str(err) + '\n') | |
1134 | current_file = None |
|
1136 | current_file = None | |
1135 | rejects += 1 |
|
1137 | rejects += 1 | |
1136 | continue |
|
1138 | continue | |
1137 | elif state == 'git': |
|
1139 | elif state == 'git': | |
1138 | for gp in values: |
|
1140 | for gp in values: | |
1139 | gp.path = pathstrip(gp.path, strip - 1)[1] |
|
1141 | gp.path = pathstrip(gp.path, strip - 1)[1] | |
1140 | if gp.oldpath: |
|
1142 | if gp.oldpath: | |
1141 | gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1] |
|
1143 | gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1] | |
1142 | # Binary patches really overwrite target files, copying them |
|
1144 | # Binary patches really overwrite target files, copying them | |
1143 | # will just make it fails with "target file exists" |
|
1145 | # will just make it fails with "target file exists" | |
1144 | if gp.op in ('COPY', 'RENAME') and not gp.binary: |
|
1146 | if gp.op in ('COPY', 'RENAME') and not gp.binary: | |
1145 | copyfn(gp.oldpath, gp.path, cwd) |
|
1147 | copyfn(gp.oldpath, gp.path, cwd) | |
1146 | changed[gp.path] = gp |
|
1148 | changed[gp.path] = gp | |
1147 | else: |
|
1149 | else: | |
1148 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1150 | raise util.Abort(_('unsupported parser state: %s') % state) | |
1149 |
|
1151 | |||
1150 | rejects += closefile() |
|
1152 | rejects += closefile() | |
1151 |
|
1153 | |||
1152 | if rejects: |
|
1154 | if rejects: | |
1153 | return -1 |
|
1155 | return -1 | |
1154 | return err |
|
1156 | return err | |
1155 |
|
1157 | |||
1156 | def externalpatch(patcher, patchname, ui, strip, cwd, files): |
|
1158 | def externalpatch(patcher, patchname, ui, strip, cwd, files): | |
1157 | """use <patcher> to apply <patchname> to the working directory. |
|
1159 | """use <patcher> to apply <patchname> to the working directory. | |
1158 | returns whether patch was applied with fuzz factor.""" |
|
1160 | returns whether patch was applied with fuzz factor.""" | |
1159 |
|
1161 | |||
1160 | fuzz = False |
|
1162 | fuzz = False | |
1161 | args = [] |
|
1163 | args = [] | |
1162 | if cwd: |
|
1164 | if cwd: | |
1163 | args.append('-d %s' % util.shellquote(cwd)) |
|
1165 | args.append('-d %s' % util.shellquote(cwd)) | |
1164 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, |
|
1166 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, | |
1165 | util.shellquote(patchname))) |
|
1167 | util.shellquote(patchname))) | |
1166 |
|
1168 | |||
1167 | for line in fp: |
|
1169 | for line in fp: | |
1168 | line = line.rstrip() |
|
1170 | line = line.rstrip() | |
1169 | ui.note(line + '\n') |
|
1171 | ui.note(line + '\n') | |
1170 | if line.startswith('patching file '): |
|
1172 | if line.startswith('patching file '): | |
1171 | pf = util.parse_patch_output(line) |
|
1173 | pf = util.parse_patch_output(line) | |
1172 | printed_file = False |
|
1174 | printed_file = False | |
1173 | files.setdefault(pf, None) |
|
1175 | files.setdefault(pf, None) | |
1174 | elif line.find('with fuzz') >= 0: |
|
1176 | elif line.find('with fuzz') >= 0: | |
1175 | fuzz = True |
|
1177 | fuzz = True | |
1176 | if not printed_file: |
|
1178 | if not printed_file: | |
1177 | ui.warn(pf + '\n') |
|
1179 | ui.warn(pf + '\n') | |
1178 | printed_file = True |
|
1180 | printed_file = True | |
1179 | ui.warn(line + '\n') |
|
1181 | ui.warn(line + '\n') | |
1180 | elif line.find('saving rejects to file') >= 0: |
|
1182 | elif line.find('saving rejects to file') >= 0: | |
1181 | ui.warn(line + '\n') |
|
1183 | ui.warn(line + '\n') | |
1182 | elif line.find('FAILED') >= 0: |
|
1184 | elif line.find('FAILED') >= 0: | |
1183 | if not printed_file: |
|
1185 | if not printed_file: | |
1184 | ui.warn(pf + '\n') |
|
1186 | ui.warn(pf + '\n') | |
1185 | printed_file = True |
|
1187 | printed_file = True | |
1186 | ui.warn(line + '\n') |
|
1188 | ui.warn(line + '\n') | |
1187 | code = fp.close() |
|
1189 | code = fp.close() | |
1188 | if code: |
|
1190 | if code: | |
1189 | raise PatchError(_("patch command failed: %s") % |
|
1191 | raise PatchError(_("patch command failed: %s") % | |
1190 | util.explain_exit(code)[0]) |
|
1192 | util.explain_exit(code)[0]) | |
1191 | return fuzz |
|
1193 | return fuzz | |
1192 |
|
1194 | |||
1193 | def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'): |
|
1195 | def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'): | |
1194 | """use builtin patch to apply <patchobj> to the working directory. |
|
1196 | """use builtin patch to apply <patchobj> to the working directory. | |
1195 | returns whether patch was applied with fuzz factor.""" |
|
1197 | returns whether patch was applied with fuzz factor.""" | |
1196 |
|
1198 | |||
1197 | if files is None: |
|
1199 | if files is None: | |
1198 | files = {} |
|
1200 | files = {} | |
1199 | if eolmode is None: |
|
1201 | if eolmode is None: | |
1200 | eolmode = ui.config('patch', 'eol', 'strict') |
|
1202 | eolmode = ui.config('patch', 'eol', 'strict') | |
1201 | if eolmode.lower() not in eolmodes: |
|
1203 | if eolmode.lower() not in eolmodes: | |
1202 | raise util.Abort(_('unsupported line endings type: %s') % eolmode) |
|
1204 | raise util.Abort(_('unsupported line endings type: %s') % eolmode) | |
1203 | eolmode = eolmode.lower() |
|
1205 | eolmode = eolmode.lower() | |
1204 |
|
1206 | |||
1205 | try: |
|
1207 | try: | |
1206 | fp = open(patchobj, 'rb') |
|
1208 | fp = open(patchobj, 'rb') | |
1207 | except TypeError: |
|
1209 | except TypeError: | |
1208 | fp = patchobj |
|
1210 | fp = patchobj | |
1209 | if cwd: |
|
1211 | if cwd: | |
1210 | curdir = os.getcwd() |
|
1212 | curdir = os.getcwd() | |
1211 | os.chdir(cwd) |
|
1213 | os.chdir(cwd) | |
1212 | try: |
|
1214 | try: | |
1213 | ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode) |
|
1215 | ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode) | |
1214 | finally: |
|
1216 | finally: | |
1215 | if cwd: |
|
1217 | if cwd: | |
1216 | os.chdir(curdir) |
|
1218 | os.chdir(curdir) | |
1217 | if fp != patchobj: |
|
1219 | if fp != patchobj: | |
1218 | fp.close() |
|
1220 | fp.close() | |
1219 | if ret < 0: |
|
1221 | if ret < 0: | |
1220 | raise PatchError(_('patch failed to apply')) |
|
1222 | raise PatchError(_('patch failed to apply')) | |
1221 | return ret > 0 |
|
1223 | return ret > 0 | |
1222 |
|
1224 | |||
1223 | def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'): |
|
1225 | def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'): | |
1224 | """Apply <patchname> to the working directory. |
|
1226 | """Apply <patchname> to the working directory. | |
1225 |
|
1227 | |||
1226 | 'eolmode' specifies how end of lines should be handled. It can be: |
|
1228 | 'eolmode' specifies how end of lines should be handled. It can be: | |
1227 | - 'strict': inputs are read in binary mode, EOLs are preserved |
|
1229 | - 'strict': inputs are read in binary mode, EOLs are preserved | |
1228 | - 'crlf': EOLs are ignored when patching and reset to CRLF |
|
1230 | - 'crlf': EOLs are ignored when patching and reset to CRLF | |
1229 | - 'lf': EOLs are ignored when patching and reset to LF |
|
1231 | - 'lf': EOLs are ignored when patching and reset to LF | |
1230 | - None: get it from user settings, default to 'strict' |
|
1232 | - None: get it from user settings, default to 'strict' | |
1231 | 'eolmode' is ignored when using an external patcher program. |
|
1233 | 'eolmode' is ignored when using an external patcher program. | |
1232 |
|
1234 | |||
1233 | Returns whether patch was applied with fuzz factor. |
|
1235 | Returns whether patch was applied with fuzz factor. | |
1234 | """ |
|
1236 | """ | |
1235 | patcher = ui.config('ui', 'patch') |
|
1237 | patcher = ui.config('ui', 'patch') | |
1236 | if files is None: |
|
1238 | if files is None: | |
1237 | files = {} |
|
1239 | files = {} | |
1238 | try: |
|
1240 | try: | |
1239 | if patcher: |
|
1241 | if patcher: | |
1240 | return externalpatch(patcher, patchname, ui, strip, cwd, files) |
|
1242 | return externalpatch(patcher, patchname, ui, strip, cwd, files) | |
1241 | return internalpatch(patchname, ui, strip, cwd, files, eolmode) |
|
1243 | return internalpatch(patchname, ui, strip, cwd, files, eolmode) | |
1242 | except PatchError, err: |
|
1244 | except PatchError, err: | |
1243 | raise util.Abort(str(err)) |
|
1245 | raise util.Abort(str(err)) | |
1244 |
|
1246 | |||
1245 | def b85diff(to, tn): |
|
1247 | def b85diff(to, tn): | |
1246 | '''print base85-encoded binary diff''' |
|
1248 | '''print base85-encoded binary diff''' | |
1247 | def gitindex(text): |
|
1249 | def gitindex(text): | |
1248 | if not text: |
|
1250 | if not text: | |
1249 | return hex(nullid) |
|
1251 | return hex(nullid) | |
1250 | l = len(text) |
|
1252 | l = len(text) | |
1251 | s = util.sha1('blob %d\0' % l) |
|
1253 | s = util.sha1('blob %d\0' % l) | |
1252 | s.update(text) |
|
1254 | s.update(text) | |
1253 | return s.hexdigest() |
|
1255 | return s.hexdigest() | |
1254 |
|
1256 | |||
1255 | def fmtline(line): |
|
1257 | def fmtline(line): | |
1256 | l = len(line) |
|
1258 | l = len(line) | |
1257 | if l <= 26: |
|
1259 | if l <= 26: | |
1258 | l = chr(ord('A') + l - 1) |
|
1260 | l = chr(ord('A') + l - 1) | |
1259 | else: |
|
1261 | else: | |
1260 | l = chr(l - 26 + ord('a') - 1) |
|
1262 | l = chr(l - 26 + ord('a') - 1) | |
1261 | return '%c%s\n' % (l, base85.b85encode(line, True)) |
|
1263 | return '%c%s\n' % (l, base85.b85encode(line, True)) | |
1262 |
|
1264 | |||
1263 | def chunk(text, csize=52): |
|
1265 | def chunk(text, csize=52): | |
1264 | l = len(text) |
|
1266 | l = len(text) | |
1265 | i = 0 |
|
1267 | i = 0 | |
1266 | while i < l: |
|
1268 | while i < l: | |
1267 | yield text[i:i + csize] |
|
1269 | yield text[i:i + csize] | |
1268 | i += csize |
|
1270 | i += csize | |
1269 |
|
1271 | |||
1270 | tohash = gitindex(to) |
|
1272 | tohash = gitindex(to) | |
1271 | tnhash = gitindex(tn) |
|
1273 | tnhash = gitindex(tn) | |
1272 | if tohash == tnhash: |
|
1274 | if tohash == tnhash: | |
1273 | return "" |
|
1275 | return "" | |
1274 |
|
1276 | |||
1275 | # TODO: deltas |
|
1277 | # TODO: deltas | |
1276 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % |
|
1278 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % | |
1277 | (tohash, tnhash, len(tn))] |
|
1279 | (tohash, tnhash, len(tn))] | |
1278 | for l in chunk(zlib.compress(tn)): |
|
1280 | for l in chunk(zlib.compress(tn)): | |
1279 | ret.append(fmtline(l)) |
|
1281 | ret.append(fmtline(l)) | |
1280 | ret.append('\n') |
|
1282 | ret.append('\n') | |
1281 | return ''.join(ret) |
|
1283 | return ''.join(ret) | |
1282 |
|
1284 | |||
1283 | class GitDiffRequired(Exception): |
|
1285 | class GitDiffRequired(Exception): | |
1284 | pass |
|
1286 | pass | |
1285 |
|
1287 | |||
1286 | def diffopts(ui, opts=None, untrusted=False): |
|
1288 | def diffopts(ui, opts=None, untrusted=False): | |
1287 | def get(key, name=None, getter=ui.configbool): |
|
1289 | def get(key, name=None, getter=ui.configbool): | |
1288 | return ((opts and opts.get(key)) or |
|
1290 | return ((opts and opts.get(key)) or | |
1289 | getter('diff', name or key, None, untrusted=untrusted)) |
|
1291 | getter('diff', name or key, None, untrusted=untrusted)) | |
1290 | return mdiff.diffopts( |
|
1292 | return mdiff.diffopts( | |
1291 | text=opts and opts.get('text'), |
|
1293 | text=opts and opts.get('text'), | |
1292 | git=get('git'), |
|
1294 | git=get('git'), | |
1293 | nodates=get('nodates'), |
|
1295 | nodates=get('nodates'), | |
1294 | showfunc=get('show_function', 'showfunc'), |
|
1296 | showfunc=get('show_function', 'showfunc'), | |
1295 | ignorews=get('ignore_all_space', 'ignorews'), |
|
1297 | ignorews=get('ignore_all_space', 'ignorews'), | |
1296 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), |
|
1298 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), | |
1297 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), |
|
1299 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), | |
1298 | context=get('unified', getter=ui.config)) |
|
1300 | context=get('unified', getter=ui.config)) | |
1299 |
|
1301 | |||
1300 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, |
|
1302 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, | |
1301 | losedatafn=None, prefix=''): |
|
1303 | losedatafn=None, prefix=''): | |
1302 | '''yields diff of changes to files between two nodes, or node and |
|
1304 | '''yields diff of changes to files between two nodes, or node and | |
1303 | working directory. |
|
1305 | working directory. | |
1304 |
|
1306 | |||
1305 | if node1 is None, use first dirstate parent instead. |
|
1307 | if node1 is None, use first dirstate parent instead. | |
1306 | if node2 is None, compare node1 with working directory. |
|
1308 | if node2 is None, compare node1 with working directory. | |
1307 |
|
1309 | |||
1308 | losedatafn(**kwarg) is a callable run when opts.upgrade=True and |
|
1310 | losedatafn(**kwarg) is a callable run when opts.upgrade=True and | |
1309 | every time some change cannot be represented with the current |
|
1311 | every time some change cannot be represented with the current | |
1310 | patch format. Return False to upgrade to git patch format, True to |
|
1312 | patch format. Return False to upgrade to git patch format, True to | |
1311 | accept the loss or raise an exception to abort the diff. It is |
|
1313 | accept the loss or raise an exception to abort the diff. It is | |
1312 | called with the name of current file being diffed as 'fn'. If set |
|
1314 | called with the name of current file being diffed as 'fn'. If set | |
1313 | to None, patches will always be upgraded to git format when |
|
1315 | to None, patches will always be upgraded to git format when | |
1314 | necessary. |
|
1316 | necessary. | |
1315 |
|
1317 | |||
1316 | prefix is a filename prefix that is prepended to all filenames on |
|
1318 | prefix is a filename prefix that is prepended to all filenames on | |
1317 | display (used for subrepos). |
|
1319 | display (used for subrepos). | |
1318 | ''' |
|
1320 | ''' | |
1319 |
|
1321 | |||
1320 | if opts is None: |
|
1322 | if opts is None: | |
1321 | opts = mdiff.defaultopts |
|
1323 | opts = mdiff.defaultopts | |
1322 |
|
1324 | |||
1323 | if not node1 and not node2: |
|
1325 | if not node1 and not node2: | |
1324 | node1 = repo.dirstate.parents()[0] |
|
1326 | node1 = repo.dirstate.parents()[0] | |
1325 |
|
1327 | |||
1326 | def lrugetfilectx(): |
|
1328 | def lrugetfilectx(): | |
1327 | cache = {} |
|
1329 | cache = {} | |
1328 | order = [] |
|
1330 | order = [] | |
1329 | def getfilectx(f, ctx): |
|
1331 | def getfilectx(f, ctx): | |
1330 | fctx = ctx.filectx(f, filelog=cache.get(f)) |
|
1332 | fctx = ctx.filectx(f, filelog=cache.get(f)) | |
1331 | if f not in cache: |
|
1333 | if f not in cache: | |
1332 | if len(cache) > 20: |
|
1334 | if len(cache) > 20: | |
1333 | del cache[order.pop(0)] |
|
1335 | del cache[order.pop(0)] | |
1334 | cache[f] = fctx.filelog() |
|
1336 | cache[f] = fctx.filelog() | |
1335 | else: |
|
1337 | else: | |
1336 | order.remove(f) |
|
1338 | order.remove(f) | |
1337 | order.append(f) |
|
1339 | order.append(f) | |
1338 | return fctx |
|
1340 | return fctx | |
1339 | return getfilectx |
|
1341 | return getfilectx | |
1340 | getfilectx = lrugetfilectx() |
|
1342 | getfilectx = lrugetfilectx() | |
1341 |
|
1343 | |||
1342 | ctx1 = repo[node1] |
|
1344 | ctx1 = repo[node1] | |
1343 | ctx2 = repo[node2] |
|
1345 | ctx2 = repo[node2] | |
1344 |
|
1346 | |||
1345 | if not changes: |
|
1347 | if not changes: | |
1346 | changes = repo.status(ctx1, ctx2, match=match) |
|
1348 | changes = repo.status(ctx1, ctx2, match=match) | |
1347 | modified, added, removed = changes[:3] |
|
1349 | modified, added, removed = changes[:3] | |
1348 |
|
1350 | |||
1349 | if not modified and not added and not removed: |
|
1351 | if not modified and not added and not removed: | |
1350 | return [] |
|
1352 | return [] | |
1351 |
|
1353 | |||
1352 | revs = None |
|
1354 | revs = None | |
1353 | if not repo.ui.quiet: |
|
1355 | if not repo.ui.quiet: | |
1354 | hexfunc = repo.ui.debugflag and hex or short |
|
1356 | hexfunc = repo.ui.debugflag and hex or short | |
1355 | revs = [hexfunc(node) for node in [node1, node2] if node] |
|
1357 | revs = [hexfunc(node) for node in [node1, node2] if node] | |
1356 |
|
1358 | |||
1357 | copy = {} |
|
1359 | copy = {} | |
1358 | if opts.git or opts.upgrade: |
|
1360 | if opts.git or opts.upgrade: | |
1359 | copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0] |
|
1361 | copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0] | |
1360 |
|
1362 | |||
1361 | difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2, |
|
1363 | difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2, | |
1362 | modified, added, removed, copy, getfilectx, opts, losedata, prefix) |
|
1364 | modified, added, removed, copy, getfilectx, opts, losedata, prefix) | |
1363 | if opts.upgrade and not opts.git: |
|
1365 | if opts.upgrade and not opts.git: | |
1364 | try: |
|
1366 | try: | |
1365 | def losedata(fn): |
|
1367 | def losedata(fn): | |
1366 | if not losedatafn or not losedatafn(fn=fn): |
|
1368 | if not losedatafn or not losedatafn(fn=fn): | |
1367 | raise GitDiffRequired() |
|
1369 | raise GitDiffRequired() | |
1368 | # Buffer the whole output until we are sure it can be generated |
|
1370 | # Buffer the whole output until we are sure it can be generated | |
1369 | return list(difffn(opts.copy(git=False), losedata)) |
|
1371 | return list(difffn(opts.copy(git=False), losedata)) | |
1370 | except GitDiffRequired: |
|
1372 | except GitDiffRequired: | |
1371 | return difffn(opts.copy(git=True), None) |
|
1373 | return difffn(opts.copy(git=True), None) | |
1372 | else: |
|
1374 | else: | |
1373 | return difffn(opts, None) |
|
1375 | return difffn(opts, None) | |
1374 |
|
1376 | |||
1375 | def difflabel(func, *args, **kw): |
|
1377 | def difflabel(func, *args, **kw): | |
1376 | '''yields 2-tuples of (output, label) based on the output of func()''' |
|
1378 | '''yields 2-tuples of (output, label) based on the output of func()''' | |
1377 | prefixes = [('diff', 'diff.diffline'), |
|
1379 | prefixes = [('diff', 'diff.diffline'), | |
1378 | ('copy', 'diff.extended'), |
|
1380 | ('copy', 'diff.extended'), | |
1379 | ('rename', 'diff.extended'), |
|
1381 | ('rename', 'diff.extended'), | |
1380 | ('old', 'diff.extended'), |
|
1382 | ('old', 'diff.extended'), | |
1381 | ('new', 'diff.extended'), |
|
1383 | ('new', 'diff.extended'), | |
1382 | ('deleted', 'diff.extended'), |
|
1384 | ('deleted', 'diff.extended'), | |
1383 | ('---', 'diff.file_a'), |
|
1385 | ('---', 'diff.file_a'), | |
1384 | ('+++', 'diff.file_b'), |
|
1386 | ('+++', 'diff.file_b'), | |
1385 | ('@@', 'diff.hunk'), |
|
1387 | ('@@', 'diff.hunk'), | |
1386 | ('-', 'diff.deleted'), |
|
1388 | ('-', 'diff.deleted'), | |
1387 | ('+', 'diff.inserted')] |
|
1389 | ('+', 'diff.inserted')] | |
1388 |
|
1390 | |||
1389 | for chunk in func(*args, **kw): |
|
1391 | for chunk in func(*args, **kw): | |
1390 | lines = chunk.split('\n') |
|
1392 | lines = chunk.split('\n') | |
1391 | for i, line in enumerate(lines): |
|
1393 | for i, line in enumerate(lines): | |
1392 | if i != 0: |
|
1394 | if i != 0: | |
1393 | yield ('\n', '') |
|
1395 | yield ('\n', '') | |
1394 | stripline = line |
|
1396 | stripline = line | |
1395 | if line and line[0] in '+-': |
|
1397 | if line and line[0] in '+-': | |
1396 | # highlight trailing whitespace, but only in changed lines |
|
1398 | # highlight trailing whitespace, but only in changed lines | |
1397 | stripline = line.rstrip() |
|
1399 | stripline = line.rstrip() | |
1398 | for prefix, label in prefixes: |
|
1400 | for prefix, label in prefixes: | |
1399 | if stripline.startswith(prefix): |
|
1401 | if stripline.startswith(prefix): | |
1400 | yield (stripline, label) |
|
1402 | yield (stripline, label) | |
1401 | break |
|
1403 | break | |
1402 | else: |
|
1404 | else: | |
1403 | yield (line, '') |
|
1405 | yield (line, '') | |
1404 | if line != stripline: |
|
1406 | if line != stripline: | |
1405 | yield (line[len(stripline):], 'diff.trailingwhitespace') |
|
1407 | yield (line[len(stripline):], 'diff.trailingwhitespace') | |
1406 |
|
1408 | |||
1407 | def diffui(*args, **kw): |
|
1409 | def diffui(*args, **kw): | |
1408 | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' |
|
1410 | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' | |
1409 | return difflabel(diff, *args, **kw) |
|
1411 | return difflabel(diff, *args, **kw) | |
1410 |
|
1412 | |||
1411 |
|
1413 | |||
1412 | def _addmodehdr(header, omode, nmode): |
|
1414 | def _addmodehdr(header, omode, nmode): | |
1413 | if omode != nmode: |
|
1415 | if omode != nmode: | |
1414 | header.append('old mode %s\n' % omode) |
|
1416 | header.append('old mode %s\n' % omode) | |
1415 | header.append('new mode %s\n' % nmode) |
|
1417 | header.append('new mode %s\n' % nmode) | |
1416 |
|
1418 | |||
1417 | def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1419 | def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, | |
1418 | copy, getfilectx, opts, losedatafn, prefix): |
|
1420 | copy, getfilectx, opts, losedatafn, prefix): | |
1419 |
|
1421 | |||
1420 | def join(f): |
|
1422 | def join(f): | |
1421 | return os.path.join(prefix, f) |
|
1423 | return os.path.join(prefix, f) | |
1422 |
|
1424 | |||
1423 | date1 = util.datestr(ctx1.date()) |
|
1425 | date1 = util.datestr(ctx1.date()) | |
1424 | man1 = ctx1.manifest() |
|
1426 | man1 = ctx1.manifest() | |
1425 |
|
1427 | |||
1426 | gone = set() |
|
1428 | gone = set() | |
1427 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} |
|
1429 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} | |
1428 |
|
1430 | |||
1429 | copyto = dict([(v, k) for k, v in copy.items()]) |
|
1431 | copyto = dict([(v, k) for k, v in copy.items()]) | |
1430 |
|
1432 | |||
1431 | if opts.git: |
|
1433 | if opts.git: | |
1432 | revs = None |
|
1434 | revs = None | |
1433 |
|
1435 | |||
1434 | for f in sorted(modified + added + removed): |
|
1436 | for f in sorted(modified + added + removed): | |
1435 | to = None |
|
1437 | to = None | |
1436 | tn = None |
|
1438 | tn = None | |
1437 | dodiff = True |
|
1439 | dodiff = True | |
1438 | header = [] |
|
1440 | header = [] | |
1439 | if f in man1: |
|
1441 | if f in man1: | |
1440 | to = getfilectx(f, ctx1).data() |
|
1442 | to = getfilectx(f, ctx1).data() | |
1441 | if f not in removed: |
|
1443 | if f not in removed: | |
1442 | tn = getfilectx(f, ctx2).data() |
|
1444 | tn = getfilectx(f, ctx2).data() | |
1443 | a, b = f, f |
|
1445 | a, b = f, f | |
1444 | if opts.git or losedatafn: |
|
1446 | if opts.git or losedatafn: | |
1445 | if f in added: |
|
1447 | if f in added: | |
1446 | mode = gitmode[ctx2.flags(f)] |
|
1448 | mode = gitmode[ctx2.flags(f)] | |
1447 | if f in copy or f in copyto: |
|
1449 | if f in copy or f in copyto: | |
1448 | if opts.git: |
|
1450 | if opts.git: | |
1449 | if f in copy: |
|
1451 | if f in copy: | |
1450 | a = copy[f] |
|
1452 | a = copy[f] | |
1451 | else: |
|
1453 | else: | |
1452 | a = copyto[f] |
|
1454 | a = copyto[f] | |
1453 | omode = gitmode[man1.flags(a)] |
|
1455 | omode = gitmode[man1.flags(a)] | |
1454 | _addmodehdr(header, omode, mode) |
|
1456 | _addmodehdr(header, omode, mode) | |
1455 | if a in removed and a not in gone: |
|
1457 | if a in removed and a not in gone: | |
1456 | op = 'rename' |
|
1458 | op = 'rename' | |
1457 | gone.add(a) |
|
1459 | gone.add(a) | |
1458 | else: |
|
1460 | else: | |
1459 | op = 'copy' |
|
1461 | op = 'copy' | |
1460 | header.append('%s from %s\n' % (op, join(a))) |
|
1462 | header.append('%s from %s\n' % (op, join(a))) | |
1461 | header.append('%s to %s\n' % (op, join(f))) |
|
1463 | header.append('%s to %s\n' % (op, join(f))) | |
1462 | to = getfilectx(a, ctx1).data() |
|
1464 | to = getfilectx(a, ctx1).data() | |
1463 | else: |
|
1465 | else: | |
1464 | losedatafn(f) |
|
1466 | losedatafn(f) | |
1465 | else: |
|
1467 | else: | |
1466 | if opts.git: |
|
1468 | if opts.git: | |
1467 | header.append('new file mode %s\n' % mode) |
|
1469 | header.append('new file mode %s\n' % mode) | |
1468 | elif ctx2.flags(f): |
|
1470 | elif ctx2.flags(f): | |
1469 | losedatafn(f) |
|
1471 | losedatafn(f) | |
1470 | # In theory, if tn was copied or renamed we should check |
|
1472 | # In theory, if tn was copied or renamed we should check | |
1471 | # if the source is binary too but the copy record already |
|
1473 | # if the source is binary too but the copy record already | |
1472 | # forces git mode. |
|
1474 | # forces git mode. | |
1473 | if util.binary(tn): |
|
1475 | if util.binary(tn): | |
1474 | if opts.git: |
|
1476 | if opts.git: | |
1475 | dodiff = 'binary' |
|
1477 | dodiff = 'binary' | |
1476 | else: |
|
1478 | else: | |
1477 | losedatafn(f) |
|
1479 | losedatafn(f) | |
1478 | if not opts.git and not tn: |
|
1480 | if not opts.git and not tn: | |
1479 | # regular diffs cannot represent new empty file |
|
1481 | # regular diffs cannot represent new empty file | |
1480 | losedatafn(f) |
|
1482 | losedatafn(f) | |
1481 | elif f in removed: |
|
1483 | elif f in removed: | |
1482 | if opts.git: |
|
1484 | if opts.git: | |
1483 | # have we already reported a copy above? |
|
1485 | # have we already reported a copy above? | |
1484 | if ((f in copy and copy[f] in added |
|
1486 | if ((f in copy and copy[f] in added | |
1485 | and copyto[copy[f]] == f) or |
|
1487 | and copyto[copy[f]] == f) or | |
1486 | (f in copyto and copyto[f] in added |
|
1488 | (f in copyto and copyto[f] in added | |
1487 | and copy[copyto[f]] == f)): |
|
1489 | and copy[copyto[f]] == f)): | |
1488 | dodiff = False |
|
1490 | dodiff = False | |
1489 | else: |
|
1491 | else: | |
1490 | header.append('deleted file mode %s\n' % |
|
1492 | header.append('deleted file mode %s\n' % | |
1491 | gitmode[man1.flags(f)]) |
|
1493 | gitmode[man1.flags(f)]) | |
1492 | elif not to or util.binary(to): |
|
1494 | elif not to or util.binary(to): | |
1493 | # regular diffs cannot represent empty file deletion |
|
1495 | # regular diffs cannot represent empty file deletion | |
1494 | losedatafn(f) |
|
1496 | losedatafn(f) | |
1495 | else: |
|
1497 | else: | |
1496 | oflag = man1.flags(f) |
|
1498 | oflag = man1.flags(f) | |
1497 | nflag = ctx2.flags(f) |
|
1499 | nflag = ctx2.flags(f) | |
1498 | binary = util.binary(to) or util.binary(tn) |
|
1500 | binary = util.binary(to) or util.binary(tn) | |
1499 | if opts.git: |
|
1501 | if opts.git: | |
1500 | _addmodehdr(header, gitmode[oflag], gitmode[nflag]) |
|
1502 | _addmodehdr(header, gitmode[oflag], gitmode[nflag]) | |
1501 | if binary: |
|
1503 | if binary: | |
1502 | dodiff = 'binary' |
|
1504 | dodiff = 'binary' | |
1503 | elif binary or nflag != oflag: |
|
1505 | elif binary or nflag != oflag: | |
1504 | losedatafn(f) |
|
1506 | losedatafn(f) | |
1505 | if opts.git: |
|
1507 | if opts.git: | |
1506 | header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) |
|
1508 | header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) | |
1507 |
|
1509 | |||
1508 | if dodiff: |
|
1510 | if dodiff: | |
1509 | if dodiff == 'binary': |
|
1511 | if dodiff == 'binary': | |
1510 | text = b85diff(to, tn) |
|
1512 | text = b85diff(to, tn) | |
1511 | else: |
|
1513 | else: | |
1512 | text = mdiff.unidiff(to, date1, |
|
1514 | text = mdiff.unidiff(to, date1, | |
1513 | # ctx2 date may be dynamic |
|
1515 | # ctx2 date may be dynamic | |
1514 | tn, util.datestr(ctx2.date()), |
|
1516 | tn, util.datestr(ctx2.date()), | |
1515 | join(a), join(b), revs, opts=opts) |
|
1517 | join(a), join(b), revs, opts=opts) | |
1516 | if header and (text or len(header) > 1): |
|
1518 | if header and (text or len(header) > 1): | |
1517 | yield ''.join(header) |
|
1519 | yield ''.join(header) | |
1518 | if text: |
|
1520 | if text: | |
1519 | yield text |
|
1521 | yield text | |
1520 |
|
1522 | |||
1521 | def diffstatdata(lines): |
|
1523 | def diffstatdata(lines): | |
1522 | filename, adds, removes = None, 0, 0 |
|
1524 | filename, adds, removes = None, 0, 0 | |
1523 | for line in lines: |
|
1525 | for line in lines: | |
1524 | if line.startswith('diff'): |
|
1526 | if line.startswith('diff'): | |
1525 | if filename: |
|
1527 | if filename: | |
1526 | isbinary = adds == 0 and removes == 0 |
|
1528 | isbinary = adds == 0 and removes == 0 | |
1527 | yield (filename, adds, removes, isbinary) |
|
1529 | yield (filename, adds, removes, isbinary) | |
1528 | # set numbers to 0 anyway when starting new file |
|
1530 | # set numbers to 0 anyway when starting new file | |
1529 | adds, removes = 0, 0 |
|
1531 | adds, removes = 0, 0 | |
1530 | if line.startswith('diff --git'): |
|
1532 | if line.startswith('diff --git'): | |
1531 | filename = gitre.search(line).group(1) |
|
1533 | filename = gitre.search(line).group(1) | |
1532 | else: |
|
1534 | else: | |
1533 | # format: "diff -r ... -r ... filename" |
|
1535 | # format: "diff -r ... -r ... filename" | |
1534 | filename = line.split(None, 5)[-1] |
|
1536 | filename = line.split(None, 5)[-1] | |
1535 | elif line.startswith('+') and not line.startswith('+++'): |
|
1537 | elif line.startswith('+') and not line.startswith('+++'): | |
1536 | adds += 1 |
|
1538 | adds += 1 | |
1537 | elif line.startswith('-') and not line.startswith('---'): |
|
1539 | elif line.startswith('-') and not line.startswith('---'): | |
1538 | removes += 1 |
|
1540 | removes += 1 | |
1539 | if filename: |
|
1541 | if filename: | |
1540 | isbinary = adds == 0 and removes == 0 |
|
1542 | isbinary = adds == 0 and removes == 0 | |
1541 | yield (filename, adds, removes, isbinary) |
|
1543 | yield (filename, adds, removes, isbinary) | |
1542 |
|
1544 | |||
1543 | def diffstat(lines, width=80, git=False): |
|
1545 | def diffstat(lines, width=80, git=False): | |
1544 | output = [] |
|
1546 | output = [] | |
1545 | stats = list(diffstatdata(lines)) |
|
1547 | stats = list(diffstatdata(lines)) | |
1546 |
|
1548 | |||
1547 | maxtotal, maxname = 0, 0 |
|
1549 | maxtotal, maxname = 0, 0 | |
1548 | totaladds, totalremoves = 0, 0 |
|
1550 | totaladds, totalremoves = 0, 0 | |
1549 | hasbinary = False |
|
1551 | hasbinary = False | |
1550 |
|
1552 | |||
1551 | sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename)) |
|
1553 | sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename)) | |
1552 | for filename, adds, removes, isbinary in stats] |
|
1554 | for filename, adds, removes, isbinary in stats] | |
1553 |
|
1555 | |||
1554 | for filename, adds, removes, isbinary, namewidth in sized: |
|
1556 | for filename, adds, removes, isbinary, namewidth in sized: | |
1555 | totaladds += adds |
|
1557 | totaladds += adds | |
1556 | totalremoves += removes |
|
1558 | totalremoves += removes | |
1557 | maxname = max(maxname, namewidth) |
|
1559 | maxname = max(maxname, namewidth) | |
1558 | maxtotal = max(maxtotal, adds + removes) |
|
1560 | maxtotal = max(maxtotal, adds + removes) | |
1559 | if isbinary: |
|
1561 | if isbinary: | |
1560 | hasbinary = True |
|
1562 | hasbinary = True | |
1561 |
|
1563 | |||
1562 | countwidth = len(str(maxtotal)) |
|
1564 | countwidth = len(str(maxtotal)) | |
1563 | if hasbinary and countwidth < 3: |
|
1565 | if hasbinary and countwidth < 3: | |
1564 | countwidth = 3 |
|
1566 | countwidth = 3 | |
1565 | graphwidth = width - countwidth - maxname - 6 |
|
1567 | graphwidth = width - countwidth - maxname - 6 | |
1566 | if graphwidth < 10: |
|
1568 | if graphwidth < 10: | |
1567 | graphwidth = 10 |
|
1569 | graphwidth = 10 | |
1568 |
|
1570 | |||
1569 | def scale(i): |
|
1571 | def scale(i): | |
1570 | if maxtotal <= graphwidth: |
|
1572 | if maxtotal <= graphwidth: | |
1571 | return i |
|
1573 | return i | |
1572 | # If diffstat runs out of room it doesn't print anything, |
|
1574 | # If diffstat runs out of room it doesn't print anything, | |
1573 | # which isn't very useful, so always print at least one + or - |
|
1575 | # which isn't very useful, so always print at least one + or - | |
1574 | # if there were at least some changes. |
|
1576 | # if there were at least some changes. | |
1575 | return max(i * graphwidth // maxtotal, int(bool(i))) |
|
1577 | return max(i * graphwidth // maxtotal, int(bool(i))) | |
1576 |
|
1578 | |||
1577 | for filename, adds, removes, isbinary, namewidth in sized: |
|
1579 | for filename, adds, removes, isbinary, namewidth in sized: | |
1578 | if git and isbinary: |
|
1580 | if git and isbinary: | |
1579 | count = 'Bin' |
|
1581 | count = 'Bin' | |
1580 | else: |
|
1582 | else: | |
1581 | count = adds + removes |
|
1583 | count = adds + removes | |
1582 | pluses = '+' * scale(adds) |
|
1584 | pluses = '+' * scale(adds) | |
1583 | minuses = '-' * scale(removes) |
|
1585 | minuses = '-' * scale(removes) | |
1584 | output.append(' %s%s | %*s %s%s\n' % |
|
1586 | output.append(' %s%s | %*s %s%s\n' % | |
1585 | (filename, ' ' * (maxname - namewidth), |
|
1587 | (filename, ' ' * (maxname - namewidth), | |
1586 | countwidth, count, |
|
1588 | countwidth, count, | |
1587 | pluses, minuses)) |
|
1589 | pluses, minuses)) | |
1588 |
|
1590 | |||
1589 | if stats: |
|
1591 | if stats: | |
1590 | output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n') |
|
1592 | output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n') | |
1591 | % (len(stats), totaladds, totalremoves)) |
|
1593 | % (len(stats), totaladds, totalremoves)) | |
1592 |
|
1594 | |||
1593 | return ''.join(output) |
|
1595 | return ''.join(output) | |
1594 |
|
1596 | |||
1595 | def diffstatui(*args, **kw): |
|
1597 | def diffstatui(*args, **kw): | |
1596 | '''like diffstat(), but yields 2-tuples of (output, label) for |
|
1598 | '''like diffstat(), but yields 2-tuples of (output, label) for | |
1597 | ui.write() |
|
1599 | ui.write() | |
1598 | ''' |
|
1600 | ''' | |
1599 |
|
1601 | |||
1600 | for line in diffstat(*args, **kw).splitlines(): |
|
1602 | for line in diffstat(*args, **kw).splitlines(): | |
1601 | if line and line[-1] in '+-': |
|
1603 | if line and line[-1] in '+-': | |
1602 | name, graph = line.rsplit(' ', 1) |
|
1604 | name, graph = line.rsplit(' ', 1) | |
1603 | yield (name + ' ', '') |
|
1605 | yield (name + ' ', '') | |
1604 | m = re.search(r'\++', graph) |
|
1606 | m = re.search(r'\++', graph) | |
1605 | if m: |
|
1607 | if m: | |
1606 | yield (m.group(0), 'diffstat.inserted') |
|
1608 | yield (m.group(0), 'diffstat.inserted') | |
1607 | m = re.search(r'-+', graph) |
|
1609 | m = re.search(r'-+', graph) | |
1608 | if m: |
|
1610 | if m: | |
1609 | yield (m.group(0), 'diffstat.deleted') |
|
1611 | yield (m.group(0), 'diffstat.deleted') | |
1610 | else: |
|
1612 | else: | |
1611 | yield (line, '') |
|
1613 | yield (line, '') | |
1612 | yield ('\n', '') |
|
1614 | yield ('\n', '') |
@@ -1,143 +1,207 b'' | |||||
1 |
|
1 | |||
2 | Test interactions between mq and patch.eol |
|
2 | Test interactions between mq and patch.eol | |
3 |
|
3 | |||
4 |
|
4 | |||
5 | $ echo "[extensions]" >> $HGRCPATH |
|
5 | $ echo "[extensions]" >> $HGRCPATH | |
6 | $ echo "mq=" >> $HGRCPATH |
|
6 | $ echo "mq=" >> $HGRCPATH | |
7 | $ echo "[diff]" >> $HGRCPATH |
|
7 | $ echo "[diff]" >> $HGRCPATH | |
8 | $ echo "nodates=1" >> $HGRCPATH |
|
8 | $ echo "nodates=1" >> $HGRCPATH | |
9 |
|
9 | |||
10 | $ cat > makepatch.py <<EOF |
|
10 | $ cat > makepatch.py <<EOF | |
11 | > f = file('eol.diff', 'wb') |
|
11 | > f = file('eol.diff', 'wb') | |
12 | > w = f.write |
|
12 | > w = f.write | |
13 | > w('test message\n') |
|
13 | > w('test message\n') | |
14 | > w('diff --git a/a b/a\n') |
|
14 | > w('diff --git a/a b/a\n') | |
15 | > w('--- a/a\n') |
|
15 | > w('--- a/a\n') | |
16 | > w('+++ b/a\n') |
|
16 | > w('+++ b/a\n') | |
17 | > w('@@ -1,5 +1,5 @@\n') |
|
17 | > w('@@ -1,5 +1,5 @@\n') | |
18 | > w(' a\n') |
|
18 | > w(' a\n') | |
19 | > w('-b\r\n') |
|
19 | > w('-b\r\n') | |
20 | > w('+y\r\n') |
|
20 | > w('+y\r\n') | |
21 | > w(' c\r\n') |
|
21 | > w(' c\r\n') | |
22 | > w(' d\n') |
|
22 | > w(' d\n') | |
23 | > w('-e\n') |
|
23 | > w('-e\n') | |
24 | > w('\ No newline at end of file\n') |
|
24 | > w('\ No newline at end of file\n') | |
25 | > w('+z\r\n') |
|
25 | > w('+z\r\n') | |
26 | > w('\ No newline at end of file\r\n') |
|
26 | > w('\ No newline at end of file\r\n') | |
27 | > EOF |
|
27 | > EOF | |
28 |
|
28 | |||
29 | $ cat > cateol.py <<EOF |
|
29 | $ cat > cateol.py <<EOF | |
30 | > import sys |
|
30 | > import sys | |
31 | > for line in file(sys.argv[1], 'rb'): |
|
31 | > for line in file(sys.argv[1], 'rb'): | |
32 | > line = line.replace('\r', '<CR>') |
|
32 | > line = line.replace('\r', '<CR>') | |
33 | > line = line.replace('\n', '<LF>') |
|
33 | > line = line.replace('\n', '<LF>') | |
34 | > print line |
|
34 | > print line | |
35 | > EOF |
|
35 | > EOF | |
36 |
|
36 | |||
37 | $ hg init repo |
|
37 | $ hg init repo | |
38 | $ cd repo |
|
38 | $ cd repo | |
39 | $ echo '\.diff' > .hgignore |
|
39 | $ echo '\.diff' > .hgignore | |
40 | $ echo '\.rej' >> .hgignore |
|
40 | $ echo '\.rej' >> .hgignore | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | Test different --eol values |
|
43 | Test different --eol values | |
44 |
|
44 | |||
45 | $ python -c 'file("a", "wb").write("a\nb\nc\nd\ne")' |
|
45 | $ python -c 'file("a", "wb").write("a\nb\nc\nd\ne")' | |
46 | $ hg ci -Am adda |
|
46 | $ hg ci -Am adda | |
47 | adding .hgignore |
|
47 | adding .hgignore | |
48 | adding a |
|
48 | adding a | |
49 | $ python ../makepatch.py |
|
49 | $ python ../makepatch.py | |
50 | $ hg qimport eol.diff |
|
50 | $ hg qimport eol.diff | |
51 | adding eol.diff to series file |
|
51 | adding eol.diff to series file | |
52 |
|
52 | |||
53 | should fail in strict mode |
|
53 | should fail in strict mode | |
54 |
|
54 | |||
55 | $ hg qpush |
|
55 | $ hg qpush | |
56 | applying eol.diff |
|
56 | applying eol.diff | |
57 | patching file a |
|
57 | patching file a | |
58 | Hunk #1 FAILED at 0 |
|
58 | Hunk #1 FAILED at 0 | |
59 | 1 out of 1 hunks FAILED -- saving rejects to file a.rej |
|
59 | 1 out of 1 hunks FAILED -- saving rejects to file a.rej | |
60 | patch failed, unable to continue (try -v) |
|
60 | patch failed, unable to continue (try -v) | |
61 | patch failed, rejects left in working dir |
|
61 | patch failed, rejects left in working dir | |
62 | errors during apply, please fix and refresh eol.diff |
|
62 | errors during apply, please fix and refresh eol.diff | |
63 | [2] |
|
63 | [2] | |
64 | $ hg qpop |
|
64 | $ hg qpop | |
65 | popping eol.diff |
|
65 | popping eol.diff | |
66 | patch queue now empty |
|
66 | patch queue now empty | |
67 |
|
67 | |||
68 | invalid eol |
|
68 | invalid eol | |
69 |
|
69 | |||
70 | $ hg --config patch.eol='LFCR' qpush |
|
70 | $ hg --config patch.eol='LFCR' qpush | |
71 | applying eol.diff |
|
71 | applying eol.diff | |
72 | patch failed, unable to continue (try -v) |
|
72 | patch failed, unable to continue (try -v) | |
73 | patch failed, rejects left in working dir |
|
73 | patch failed, rejects left in working dir | |
74 | errors during apply, please fix and refresh eol.diff |
|
74 | errors during apply, please fix and refresh eol.diff | |
75 | [2] |
|
75 | [2] | |
76 | $ hg qpop |
|
76 | $ hg qpop | |
77 | popping eol.diff |
|
77 | popping eol.diff | |
78 | patch queue now empty |
|
78 | patch queue now empty | |
79 |
|
79 | |||
80 | force LF |
|
80 | force LF | |
81 |
|
81 | |||
82 | $ hg --config patch.eol='CRLF' qpush |
|
82 | $ hg --config patch.eol='CRLF' qpush | |
83 | applying eol.diff |
|
83 | applying eol.diff | |
84 | now at: eol.diff |
|
84 | now at: eol.diff | |
85 | $ hg qrefresh |
|
85 | $ hg qrefresh | |
86 | $ python ../cateol.py .hg/patches/eol.diff |
|
86 | $ python ../cateol.py .hg/patches/eol.diff | |
87 | test message<LF> |
|
87 | test message<LF> | |
88 | <LF> |
|
88 | <LF> | |
89 | diff -r 0d0bf99a8b7a a<LF> |
|
89 | diff -r 0d0bf99a8b7a a<LF> | |
90 | --- a/a<LF> |
|
90 | --- a/a<LF> | |
91 | +++ b/a<LF> |
|
91 | +++ b/a<LF> | |
92 | @@ -1,5 +1,5 @@<LF> |
|
92 | @@ -1,5 +1,5 @@<LF> | |
93 | -a<LF> |
|
93 | -a<LF> | |
94 | -b<LF> |
|
94 | -b<LF> | |
95 | -c<LF> |
|
95 | -c<LF> | |
96 | -d<LF> |
|
96 | -d<LF> | |
97 | -e<LF> |
|
97 | -e<LF> | |
98 | \ No newline at end of file<LF> |
|
98 | \ No newline at end of file<LF> | |
99 | +a<CR><LF> |
|
99 | +a<CR><LF> | |
100 | +y<CR><LF> |
|
100 | +y<CR><LF> | |
101 | +c<CR><LF> |
|
101 | +c<CR><LF> | |
102 | +d<CR><LF> |
|
102 | +d<CR><LF> | |
103 | +z<LF> |
|
103 | +z<LF> | |
104 | \ No newline at end of file<LF> |
|
104 | \ No newline at end of file<LF> | |
105 | $ python ../cateol.py a |
|
105 | $ python ../cateol.py a | |
106 | a<CR><LF> |
|
106 | a<CR><LF> | |
107 | y<CR><LF> |
|
107 | y<CR><LF> | |
108 | c<CR><LF> |
|
108 | c<CR><LF> | |
109 | d<CR><LF> |
|
109 | d<CR><LF> | |
110 | z |
|
110 | z | |
111 | $ hg qpop |
|
111 | $ hg qpop | |
112 | popping eol.diff |
|
112 | popping eol.diff | |
113 | patch queue now empty |
|
113 | patch queue now empty | |
114 |
|
114 | |||
115 | push again forcing LF and compare revisions |
|
115 | push again forcing LF and compare revisions | |
116 |
|
116 | |||
117 | $ hg --config patch.eol='CRLF' qpush |
|
117 | $ hg --config patch.eol='CRLF' qpush | |
118 | applying eol.diff |
|
118 | applying eol.diff | |
119 | now at: eol.diff |
|
119 | now at: eol.diff | |
120 | $ python ../cateol.py a |
|
120 | $ python ../cateol.py a | |
121 | a<CR><LF> |
|
121 | a<CR><LF> | |
122 | y<CR><LF> |
|
122 | y<CR><LF> | |
123 | c<CR><LF> |
|
123 | c<CR><LF> | |
124 | d<CR><LF> |
|
124 | d<CR><LF> | |
125 | z |
|
125 | z | |
126 | $ hg qpop |
|
126 | $ hg qpop | |
127 | popping eol.diff |
|
127 | popping eol.diff | |
128 | patch queue now empty |
|
128 | patch queue now empty | |
129 |
|
129 | |||
130 | push again without LF and compare revisions |
|
130 | push again without LF and compare revisions | |
131 |
|
131 | |||
132 | $ hg qpush |
|
132 | $ hg qpush | |
133 | applying eol.diff |
|
133 | applying eol.diff | |
134 | now at: eol.diff |
|
134 | now at: eol.diff | |
135 | $ python ../cateol.py a |
|
135 | $ python ../cateol.py a | |
136 | a<CR><LF> |
|
136 | a<CR><LF> | |
137 | y<CR><LF> |
|
137 | y<CR><LF> | |
138 | c<CR><LF> |
|
138 | c<CR><LF> | |
139 | d<CR><LF> |
|
139 | d<CR><LF> | |
140 | z |
|
140 | z | |
141 | $ hg qpop |
|
141 | $ hg qpop | |
142 | popping eol.diff |
|
142 | popping eol.diff | |
143 | patch queue now empty |
|
143 | patch queue now empty | |
|
144 | $ cd .. | |||
|
145 | ||||
|
146 | ||||
|
147 | Test .rej file EOL are left unchanged | |||
|
148 | ||||
|
149 | $ hg init testeol | |||
|
150 | $ cd testeol | |||
|
151 | $ python -c "file('a', 'wb').write('1\r\n2\r\n3\r\n4')" | |||
|
152 | $ hg ci -Am adda | |||
|
153 | adding a | |||
|
154 | $ python -c "file('a', 'wb').write('1\r\n2\r\n33\r\n4')" | |||
|
155 | $ hg qnew patch1 | |||
|
156 | $ hg qpop | |||
|
157 | popping patch1 | |||
|
158 | patch queue now empty | |||
|
159 | $ python -c "file('a', 'wb').write('1\r\n22\r\n33\r\n4')" | |||
|
160 | $ hg ci -m changea | |||
|
161 | ||||
|
162 | $ hg --config 'patch.eol=LF' qpush | |||
|
163 | applying patch1 | |||
|
164 | patching file a | |||
|
165 | Hunk #1 FAILED at 0 | |||
|
166 | 1 out of 1 hunks FAILED -- saving rejects to file a.rej | |||
|
167 | patch failed, unable to continue (try -v) | |||
|
168 | patch failed, rejects left in working dir | |||
|
169 | errors during apply, please fix and refresh patch1 | |||
|
170 | [2] | |||
|
171 | $ hg qpop | |||
|
172 | popping patch1 | |||
|
173 | patch queue now empty | |||
|
174 | $ cat a.rej | |||
|
175 | --- a | |||
|
176 | +++ a | |||
|
177 | @@ -1,4 +1,4 @@ | |||
|
178 | 1\r (esc) | |||
|
179 | 2\r (esc) | |||
|
180 | -3\r (esc) | |||
|
181 | +33\r (esc) | |||
|
182 | 4 | |||
|
183 | \ No newline at end of file | |||
|
184 | ||||
|
185 | $ hg --config 'patch.eol=auto' qpush | |||
|
186 | applying patch1 | |||
|
187 | patching file a | |||
|
188 | Hunk #1 FAILED at 0 | |||
|
189 | 1 out of 1 hunks FAILED -- saving rejects to file a.rej | |||
|
190 | patch failed, unable to continue (try -v) | |||
|
191 | patch failed, rejects left in working dir | |||
|
192 | errors during apply, please fix and refresh patch1 | |||
|
193 | [2] | |||
|
194 | $ hg qpop | |||
|
195 | popping patch1 | |||
|
196 | patch queue now empty | |||
|
197 | $ cat a.rej | |||
|
198 | --- a | |||
|
199 | +++ a | |||
|
200 | @@ -1,4 +1,4 @@ | |||
|
201 | 1\r (esc) | |||
|
202 | 2\r (esc) | |||
|
203 | -3\r (esc) | |||
|
204 | +33\r (esc) | |||
|
205 | 4 | |||
|
206 | \ No newline at end of file | |||
|
207 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now