Show More
@@ -0,0 +1,1 b'' | |||||
|
1 | test@test.com No newline at end of file |
@@ -0,0 +1,59 b'' | |||||
|
1 | # Binary file regexps: | |||
|
2 | \.png$ | |||
|
3 | \.PNG$ | |||
|
4 | \.gz$ | |||
|
5 | \.GZ$ | |||
|
6 | \.pdf$ | |||
|
7 | \.PDF$ | |||
|
8 | \.jpg$ | |||
|
9 | \.JPG$ | |||
|
10 | \.jpeg$ | |||
|
11 | \.JPEG$ | |||
|
12 | \.gif$ | |||
|
13 | \.GIF$ | |||
|
14 | \.tif$ | |||
|
15 | \.TIF$ | |||
|
16 | \.tiff$ | |||
|
17 | \.TIFF$ | |||
|
18 | \.pnm$ | |||
|
19 | \.PNM$ | |||
|
20 | \.pbm$ | |||
|
21 | \.PBM$ | |||
|
22 | \.pgm$ | |||
|
23 | \.PGM$ | |||
|
24 | \.ppm$ | |||
|
25 | \.PPM$ | |||
|
26 | \.bmp$ | |||
|
27 | \.BMP$ | |||
|
28 | \.mng$ | |||
|
29 | \.MNG$ | |||
|
30 | \.tar$ | |||
|
31 | \.TAR$ | |||
|
32 | \.bz2$ | |||
|
33 | \.BZ2$ | |||
|
34 | \.z$ | |||
|
35 | \.Z$ | |||
|
36 | \.zip$ | |||
|
37 | \.ZIP$ | |||
|
38 | \.jar$ | |||
|
39 | \.JAR$ | |||
|
40 | \.so$ | |||
|
41 | \.SO$ | |||
|
42 | \.a$ | |||
|
43 | \.A$ | |||
|
44 | \.tgz$ | |||
|
45 | \.TGZ$ | |||
|
46 | \.mpg$ | |||
|
47 | \.MPG$ | |||
|
48 | \.mpeg$ | |||
|
49 | \.MPEG$ | |||
|
50 | \.iso$ | |||
|
51 | \.ISO$ | |||
|
52 | \.exe$ | |||
|
53 | \.EXE$ | |||
|
54 | \.doc$ | |||
|
55 | \.DOC$ | |||
|
56 | \.elc$ | |||
|
57 | \.ELC$ | |||
|
58 | \.pyc$ | |||
|
59 | \.PYC$ |
@@ -0,0 +1,49 b'' | |||||
|
1 | # Boring file regexps: | |||
|
2 | \.hi$ | |||
|
3 | \.hi-boot$ | |||
|
4 | \.o-boot$ | |||
|
5 | \.o$ | |||
|
6 | \.o\.cmd$ | |||
|
7 | # *.ko files aren't boring by default because they might | |||
|
8 | # be Korean translations rather than kernel modules. | |||
|
9 | # \.ko$ | |||
|
10 | \.ko\.cmd$ | |||
|
11 | \.mod\.c$ | |||
|
12 | (^|/)\.tmp_versions($|/) | |||
|
13 | (^|/)CVS($|/) | |||
|
14 | \.cvsignore$ | |||
|
15 | ^\.# | |||
|
16 | (^|/)RCS($|/) | |||
|
17 | ,v$ | |||
|
18 | (^|/)\.svn($|/) | |||
|
19 | \.bzr$ | |||
|
20 | (^|/)SCCS($|/) | |||
|
21 | ~$ | |||
|
22 | (^|/)_darcs($|/) | |||
|
23 | \.bak$ | |||
|
24 | \.BAK$ | |||
|
25 | \.orig$ | |||
|
26 | \.rej$ | |||
|
27 | (^|/)vssver\.scc$ | |||
|
28 | \.swp$ | |||
|
29 | (^|/)MT($|/) | |||
|
30 | (^|/)\{arch\}($|/) | |||
|
31 | (^|/).arch-ids($|/) | |||
|
32 | (^|/), | |||
|
33 | \.prof$ | |||
|
34 | (^|/)\.DS_Store$ | |||
|
35 | (^|/)BitKeeper($|/) | |||
|
36 | (^|/)ChangeSet($|/) | |||
|
37 | \.py[co]$ | |||
|
38 | \.elc$ | |||
|
39 | \.class$ | |||
|
40 | \# | |||
|
41 | (^|/)Thumbs\.db$ | |||
|
42 | (^|/)autom4te\.cache($|/) | |||
|
43 | (^|/)config\.(log|status)$ | |||
|
44 | ^\.depend$ | |||
|
45 | (^|/)(tags|TAGS)$ | |||
|
46 | #(^|/)\.[^/] | |||
|
47 | (^|/|\.)core$ | |||
|
48 | \.(obj|a|exe|so|lo|la)$ | |||
|
49 | ^\.darcs-temp-mail$ |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 |
@@ -0,0 +1,1 b'' | |||||
|
1 | a |
@@ -0,0 +1,1 b'' | |||||
|
1 | a |
@@ -1,545 +1,549 b'' | |||||
1 | # Mercurial extension to provide the 'hg bookmark' command |
|
1 | # Mercurial extension to provide the 'hg bookmark' command | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 David Soria Parra <dsp@php.net> |
|
3 | # Copyright 2008 David Soria Parra <dsp@php.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''track a line of development with movable markers |
|
8 | '''track a line of development with movable markers | |
9 |
|
9 | |||
10 | Bookmarks are local movable markers to changesets. Every bookmark |
|
10 | Bookmarks are local movable markers to changesets. Every bookmark | |
11 | points to a changeset identified by its hash. If you commit a |
|
11 | points to a changeset identified by its hash. If you commit a | |
12 | changeset that is based on a changeset that has a bookmark on it, the |
|
12 | changeset that is based on a changeset that has a bookmark on it, the | |
13 | bookmark shifts to the new changeset. |
|
13 | bookmark shifts to the new changeset. | |
14 |
|
14 | |||
15 | It is possible to use bookmark names in every revision lookup (e.g. |
|
15 | It is possible to use bookmark names in every revision lookup (e.g. | |
16 | :hg:`merge`, :hg:`update`). |
|
16 | :hg:`merge`, :hg:`update`). | |
17 |
|
17 | |||
18 | By default, when several bookmarks point to the same changeset, they |
|
18 | By default, when several bookmarks point to the same changeset, they | |
19 | will all move forward together. It is possible to obtain a more |
|
19 | will all move forward together. It is possible to obtain a more | |
20 | git-like experience by adding the following configuration option to |
|
20 | git-like experience by adding the following configuration option to | |
21 | your configuration file:: |
|
21 | your configuration file:: | |
22 |
|
22 | |||
23 | [bookmarks] |
|
23 | [bookmarks] | |
24 | track.current = True |
|
24 | track.current = True | |
25 |
|
25 | |||
26 | This will cause Mercurial to track the bookmark that you are currently |
|
26 | This will cause Mercurial to track the bookmark that you are currently | |
27 | using, and only update it. This is similar to git's approach to |
|
27 | using, and only update it. This is similar to git's approach to | |
28 | branching. |
|
28 | branching. | |
29 | ''' |
|
29 | ''' | |
30 |
|
30 | |||
31 | from mercurial.i18n import _ |
|
31 | from mercurial.i18n import _ | |
32 | from mercurial.node import nullid, nullrev, hex, short |
|
32 | from mercurial.node import nullid, nullrev, hex, short | |
33 | from mercurial import util, commands, repair, extensions, pushkey, hg, url |
|
33 | from mercurial import util, commands, repair, extensions, pushkey, hg, url | |
34 | import os |
|
34 | import os | |
35 |
|
35 | |||
36 | def write(repo): |
|
36 | def write(repo): | |
37 | '''Write bookmarks |
|
37 | '''Write bookmarks | |
38 |
|
38 | |||
39 | Write the given bookmark => hash dictionary to the .hg/bookmarks file |
|
39 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
40 | in a format equal to those of localtags. |
|
40 | in a format equal to those of localtags. | |
41 |
|
41 | |||
42 | We also store a backup of the previous state in undo.bookmarks that |
|
42 | We also store a backup of the previous state in undo.bookmarks that | |
43 | can be copied back on rollback. |
|
43 | can be copied back on rollback. | |
44 | ''' |
|
44 | ''' | |
45 | refs = repo._bookmarks |
|
45 | refs = repo._bookmarks | |
46 | if os.path.exists(repo.join('bookmarks')): |
|
46 | if os.path.exists(repo.join('bookmarks')): | |
47 | util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) |
|
47 | util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) | |
48 | if repo._bookmarkcurrent not in refs: |
|
48 | if repo._bookmarkcurrent not in refs: | |
49 | setcurrent(repo, None) |
|
49 | setcurrent(repo, None) | |
50 | wlock = repo.wlock() |
|
50 | wlock = repo.wlock() | |
51 | try: |
|
51 | try: | |
52 | file = repo.opener('bookmarks', 'w', atomictemp=True) |
|
52 | file = repo.opener('bookmarks', 'w', atomictemp=True) | |
53 | for refspec, node in refs.iteritems(): |
|
53 | for refspec, node in refs.iteritems(): | |
54 | file.write("%s %s\n" % (hex(node), refspec)) |
|
54 | file.write("%s %s\n" % (hex(node), refspec)) | |
55 | file.rename() |
|
55 | file.rename() | |
56 |
|
56 | |||
57 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) |
|
57 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) | |
58 | try: |
|
58 | try: | |
59 | os.utime(repo.sjoin('00changelog.i'), None) |
|
59 | os.utime(repo.sjoin('00changelog.i'), None) | |
60 | except OSError: |
|
60 | except OSError: | |
61 | pass |
|
61 | pass | |
62 |
|
62 | |||
63 | finally: |
|
63 | finally: | |
64 | wlock.release() |
|
64 | wlock.release() | |
65 |
|
65 | |||
66 | def setcurrent(repo, mark): |
|
66 | def setcurrent(repo, mark): | |
67 | '''Set the name of the bookmark that we are currently on |
|
67 | '''Set the name of the bookmark that we are currently on | |
68 |
|
68 | |||
69 | Set the name of the bookmark that we are on (hg update <bookmark>). |
|
69 | Set the name of the bookmark that we are on (hg update <bookmark>). | |
70 | The name is recorded in .hg/bookmarks.current |
|
70 | The name is recorded in .hg/bookmarks.current | |
71 | ''' |
|
71 | ''' | |
72 | current = repo._bookmarkcurrent |
|
72 | current = repo._bookmarkcurrent | |
73 | if current == mark: |
|
73 | if current == mark: | |
74 | return |
|
74 | return | |
75 |
|
75 | |||
76 | refs = repo._bookmarks |
|
76 | refs = repo._bookmarks | |
77 |
|
77 | |||
78 | # do not update if we do update to a rev equal to the current bookmark |
|
78 | # do not update if we do update to a rev equal to the current bookmark | |
79 | if (mark and mark not in refs and |
|
79 | if (mark and mark not in refs and | |
80 | current and refs[current] == repo.changectx('.').node()): |
|
80 | current and refs[current] == repo.changectx('.').node()): | |
81 | return |
|
81 | return | |
82 | if mark not in refs: |
|
82 | if mark not in refs: | |
83 | mark = '' |
|
83 | mark = '' | |
84 | wlock = repo.wlock() |
|
84 | wlock = repo.wlock() | |
85 | try: |
|
85 | try: | |
86 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) |
|
86 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) | |
87 | file.write(mark) |
|
87 | file.write(mark) | |
88 | file.rename() |
|
88 | file.rename() | |
89 | finally: |
|
89 | finally: | |
90 | wlock.release() |
|
90 | wlock.release() | |
91 | repo._bookmarkcurrent = mark |
|
91 | repo._bookmarkcurrent = mark | |
92 |
|
92 | |||
93 | def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None): |
|
93 | def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None): | |
94 | '''track a line of development with movable markers |
|
94 | '''track a line of development with movable markers | |
95 |
|
95 | |||
96 | Bookmarks are pointers to certain commits that move when |
|
96 | Bookmarks are pointers to certain commits that move when | |
97 | committing. Bookmarks are local. They can be renamed, copied and |
|
97 | committing. Bookmarks are local. They can be renamed, copied and | |
98 | deleted. It is possible to use bookmark names in :hg:`merge` and |
|
98 | deleted. It is possible to use bookmark names in :hg:`merge` and | |
99 | :hg:`update` to merge and update respectively to a given bookmark. |
|
99 | :hg:`update` to merge and update respectively to a given bookmark. | |
100 |
|
100 | |||
101 | You can use :hg:`bookmark NAME` to set a bookmark on the working |
|
101 | You can use :hg:`bookmark NAME` to set a bookmark on the working | |
102 | directory's parent revision with the given name. If you specify |
|
102 | directory's parent revision with the given name. If you specify | |
103 | a revision using -r REV (where REV may be an existing bookmark), |
|
103 | a revision using -r REV (where REV may be an existing bookmark), | |
104 | the bookmark is assigned to that revision. |
|
104 | the bookmark is assigned to that revision. | |
105 | ''' |
|
105 | ''' | |
106 | hexfn = ui.debugflag and hex or short |
|
106 | hexfn = ui.debugflag and hex or short | |
107 | marks = repo._bookmarks |
|
107 | marks = repo._bookmarks | |
108 | cur = repo.changectx('.').node() |
|
108 | cur = repo.changectx('.').node() | |
109 |
|
109 | |||
110 | if rename: |
|
110 | if rename: | |
111 | if rename not in marks: |
|
111 | if rename not in marks: | |
112 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
112 | raise util.Abort(_("a bookmark of this name does not exist")) | |
113 | if mark in marks and not force: |
|
113 | if mark in marks and not force: | |
114 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
114 | raise util.Abort(_("a bookmark of the same name already exists")) | |
115 | if mark is None: |
|
115 | if mark is None: | |
116 | raise util.Abort(_("new bookmark name required")) |
|
116 | raise util.Abort(_("new bookmark name required")) | |
117 | marks[mark] = marks[rename] |
|
117 | marks[mark] = marks[rename] | |
118 | del marks[rename] |
|
118 | del marks[rename] | |
119 | if repo._bookmarkcurrent == rename: |
|
119 | if repo._bookmarkcurrent == rename: | |
120 | setcurrent(repo, mark) |
|
120 | setcurrent(repo, mark) | |
121 | write(repo) |
|
121 | write(repo) | |
122 | return |
|
122 | return | |
123 |
|
123 | |||
124 | if delete: |
|
124 | if delete: | |
125 | if mark is None: |
|
125 | if mark is None: | |
126 | raise util.Abort(_("bookmark name required")) |
|
126 | raise util.Abort(_("bookmark name required")) | |
127 | if mark not in marks: |
|
127 | if mark not in marks: | |
128 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
128 | raise util.Abort(_("a bookmark of this name does not exist")) | |
129 | if mark == repo._bookmarkcurrent: |
|
129 | if mark == repo._bookmarkcurrent: | |
130 | setcurrent(repo, None) |
|
130 | setcurrent(repo, None) | |
131 | del marks[mark] |
|
131 | del marks[mark] | |
132 | write(repo) |
|
132 | write(repo) | |
133 | return |
|
133 | return | |
134 |
|
134 | |||
135 | if mark != None: |
|
135 | if mark != None: | |
136 | if "\n" in mark: |
|
136 | if "\n" in mark: | |
137 | raise util.Abort(_("bookmark name cannot contain newlines")) |
|
137 | raise util.Abort(_("bookmark name cannot contain newlines")) | |
138 | mark = mark.strip() |
|
138 | mark = mark.strip() | |
139 | if not mark: |
|
139 | if not mark: | |
140 | raise util.Abort(_("bookmark names cannot consist entirely of " |
|
140 | raise util.Abort(_("bookmark names cannot consist entirely of " | |
141 | "whitespace")) |
|
141 | "whitespace")) | |
142 | if mark in marks and not force: |
|
142 | if mark in marks and not force: | |
143 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
143 | raise util.Abort(_("a bookmark of the same name already exists")) | |
144 | if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) |
|
144 | if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) | |
145 | and not force): |
|
145 | and not force): | |
146 | raise util.Abort( |
|
146 | raise util.Abort( | |
147 | _("a bookmark cannot have the name of an existing branch")) |
|
147 | _("a bookmark cannot have the name of an existing branch")) | |
148 | if rev: |
|
148 | if rev: | |
149 | marks[mark] = repo.lookup(rev) |
|
149 | marks[mark] = repo.lookup(rev) | |
150 | else: |
|
150 | else: | |
151 | marks[mark] = repo.changectx('.').node() |
|
151 | marks[mark] = repo.changectx('.').node() | |
152 | setcurrent(repo, mark) |
|
152 | setcurrent(repo, mark) | |
153 | write(repo) |
|
153 | write(repo) | |
154 | return |
|
154 | return | |
155 |
|
155 | |||
156 | if mark is None: |
|
156 | if mark is None: | |
157 | if rev: |
|
157 | if rev: | |
158 | raise util.Abort(_("bookmark name required")) |
|
158 | raise util.Abort(_("bookmark name required")) | |
159 | if len(marks) == 0: |
|
159 | if len(marks) == 0: | |
160 | ui.status(_("no bookmarks set\n")) |
|
160 | ui.status(_("no bookmarks set\n")) | |
161 | else: |
|
161 | else: | |
162 | for bmark, n in marks.iteritems(): |
|
162 | for bmark, n in marks.iteritems(): | |
163 | if ui.configbool('bookmarks', 'track.current'): |
|
163 | if ui.configbool('bookmarks', 'track.current'): | |
164 | current = repo._bookmarkcurrent |
|
164 | current = repo._bookmarkcurrent | |
165 | if bmark == current and n == cur: |
|
165 | if bmark == current and n == cur: | |
166 | prefix, label = '*', 'bookmarks.current' |
|
166 | prefix, label = '*', 'bookmarks.current' | |
167 | else: |
|
167 | else: | |
168 | prefix, label = ' ', '' |
|
168 | prefix, label = ' ', '' | |
169 | else: |
|
169 | else: | |
170 | if n == cur: |
|
170 | if n == cur: | |
171 | prefix, label = '*', 'bookmarks.current' |
|
171 | prefix, label = '*', 'bookmarks.current' | |
172 | else: |
|
172 | else: | |
173 | prefix, label = ' ', '' |
|
173 | prefix, label = ' ', '' | |
174 |
|
174 | |||
175 | if ui.quiet: |
|
175 | if ui.quiet: | |
176 | ui.write("%s\n" % bmark, label=label) |
|
176 | ui.write("%s\n" % bmark, label=label) | |
177 | else: |
|
177 | else: | |
178 | ui.write(" %s %-25s %d:%s\n" % ( |
|
178 | ui.write(" %s %-25s %d:%s\n" % ( | |
179 | prefix, bmark, repo.changelog.rev(n), hexfn(n)), |
|
179 | prefix, bmark, repo.changelog.rev(n), hexfn(n)), | |
180 | label=label) |
|
180 | label=label) | |
181 | return |
|
181 | return | |
182 |
|
182 | |||
183 | def _revstostrip(changelog, node): |
|
183 | def _revstostrip(changelog, node): | |
184 | srev = changelog.rev(node) |
|
184 | srev = changelog.rev(node) | |
185 | tostrip = [srev] |
|
185 | tostrip = [srev] | |
186 | saveheads = [] |
|
186 | saveheads = [] | |
187 | for r in xrange(srev, len(changelog)): |
|
187 | for r in xrange(srev, len(changelog)): | |
188 | parents = changelog.parentrevs(r) |
|
188 | parents = changelog.parentrevs(r) | |
189 | if parents[0] in tostrip or parents[1] in tostrip: |
|
189 | if parents[0] in tostrip or parents[1] in tostrip: | |
190 | tostrip.append(r) |
|
190 | tostrip.append(r) | |
191 | if parents[1] != nullrev: |
|
191 | if parents[1] != nullrev: | |
192 | for p in parents: |
|
192 | for p in parents: | |
193 | if p not in tostrip and p > srev: |
|
193 | if p not in tostrip and p > srev: | |
194 | saveheads.append(p) |
|
194 | saveheads.append(p) | |
195 | return [r for r in tostrip if r not in saveheads] |
|
195 | return [r for r in tostrip if r not in saveheads] | |
196 |
|
196 | |||
197 | def strip(oldstrip, ui, repo, node, backup="all"): |
|
197 | def strip(oldstrip, ui, repo, node, backup="all"): | |
198 | """Strip bookmarks if revisions are stripped using |
|
198 | """Strip bookmarks if revisions are stripped using | |
199 | the mercurial.strip method. This usually happens during |
|
199 | the mercurial.strip method. This usually happens during | |
200 | qpush and qpop""" |
|
200 | qpush and qpop""" | |
201 | revisions = _revstostrip(repo.changelog, node) |
|
201 | revisions = _revstostrip(repo.changelog, node) | |
202 | marks = repo._bookmarks |
|
202 | marks = repo._bookmarks | |
203 | update = [] |
|
203 | update = [] | |
204 | for mark, n in marks.iteritems(): |
|
204 | for mark, n in marks.iteritems(): | |
205 | if repo.changelog.rev(n) in revisions: |
|
205 | if repo.changelog.rev(n) in revisions: | |
206 | update.append(mark) |
|
206 | update.append(mark) | |
207 | oldstrip(ui, repo, node, backup) |
|
207 | oldstrip(ui, repo, node, backup) | |
208 | if len(update) > 0: |
|
208 | if len(update) > 0: | |
209 | for m in update: |
|
209 | for m in update: | |
210 | marks[m] = repo.changectx('.').node() |
|
210 | marks[m] = repo.changectx('.').node() | |
211 | write(repo) |
|
211 | write(repo) | |
212 |
|
212 | |||
213 | def reposetup(ui, repo): |
|
213 | def reposetup(ui, repo): | |
214 | if not repo.local(): |
|
214 | if not repo.local(): | |
215 | return |
|
215 | return | |
216 |
|
216 | |||
217 | class bookmark_repo(repo.__class__): |
|
217 | class bookmark_repo(repo.__class__): | |
218 |
|
218 | |||
219 | @util.propertycache |
|
219 | @util.propertycache | |
220 | def _bookmarks(self): |
|
220 | def _bookmarks(self): | |
221 | '''Parse .hg/bookmarks file and return a dictionary |
|
221 | '''Parse .hg/bookmarks file and return a dictionary | |
222 |
|
222 | |||
223 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values |
|
223 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values | |
224 | in the .hg/bookmarks file. |
|
224 | in the .hg/bookmarks file. | |
225 | Read the file and return a (name=>nodeid) dictionary |
|
225 | Read the file and return a (name=>nodeid) dictionary | |
226 | ''' |
|
226 | ''' | |
|
227 | self._loadingbookmarks = True | |||
227 | try: |
|
228 | try: | |
228 | bookmarks = {} |
|
229 | bookmarks = {} | |
229 | for line in self.opener('bookmarks'): |
|
230 | for line in self.opener('bookmarks'): | |
230 | sha, refspec = line.strip().split(' ', 1) |
|
231 | sha, refspec = line.strip().split(' ', 1) | |
231 | bookmarks[refspec] = super(bookmark_repo, self).lookup(sha) |
|
232 | bookmarks[refspec] = super(bookmark_repo, self).lookup(sha) | |
232 | except: |
|
233 | except: | |
233 | pass |
|
234 | pass | |
|
235 | self._loadingbookmarks = False | |||
234 | return bookmarks |
|
236 | return bookmarks | |
235 |
|
237 | |||
236 | @util.propertycache |
|
238 | @util.propertycache | |
237 | def _bookmarkcurrent(self): |
|
239 | def _bookmarkcurrent(self): | |
238 | '''Get the current bookmark |
|
240 | '''Get the current bookmark | |
239 |
|
241 | |||
240 | If we use gittishsh branches we have a current bookmark that |
|
242 | If we use gittishsh branches we have a current bookmark that | |
241 | we are on. This function returns the name of the bookmark. It |
|
243 | we are on. This function returns the name of the bookmark. It | |
242 | is stored in .hg/bookmarks.current |
|
244 | is stored in .hg/bookmarks.current | |
243 | ''' |
|
245 | ''' | |
244 | mark = None |
|
246 | mark = None | |
245 | if os.path.exists(self.join('bookmarks.current')): |
|
247 | if os.path.exists(self.join('bookmarks.current')): | |
246 | file = self.opener('bookmarks.current') |
|
248 | file = self.opener('bookmarks.current') | |
247 | # No readline() in posixfile_nt, reading everything is cheap |
|
249 | # No readline() in posixfile_nt, reading everything is cheap | |
248 | mark = (file.readlines() or [''])[0] |
|
250 | mark = (file.readlines() or [''])[0] | |
249 | if mark == '': |
|
251 | if mark == '': | |
250 | mark = None |
|
252 | mark = None | |
251 | file.close() |
|
253 | file.close() | |
252 | return mark |
|
254 | return mark | |
253 |
|
255 | |||
254 | def rollback(self, *args): |
|
256 | def rollback(self, *args): | |
255 | if os.path.exists(self.join('undo.bookmarks')): |
|
257 | if os.path.exists(self.join('undo.bookmarks')): | |
256 | util.rename(self.join('undo.bookmarks'), self.join('bookmarks')) |
|
258 | util.rename(self.join('undo.bookmarks'), self.join('bookmarks')) | |
257 | return super(bookmark_repo, self).rollback(*args) |
|
259 | return super(bookmark_repo, self).rollback(*args) | |
258 |
|
260 | |||
259 | def lookup(self, key): |
|
261 | def lookup(self, key): | |
|
262 | if not getattr(self, '_loadingbookmarks', False): | |||
260 | if key in self._bookmarks: |
|
263 | if key in self._bookmarks: | |
261 | key = self._bookmarks[key] |
|
264 | key = self._bookmarks[key] | |
262 | return super(bookmark_repo, self).lookup(key) |
|
265 | return super(bookmark_repo, self).lookup(key) | |
263 |
|
266 | |||
264 | def _bookmarksupdate(self, parents, node): |
|
267 | def _bookmarksupdate(self, parents, node): | |
265 | marks = self._bookmarks |
|
268 | marks = self._bookmarks | |
266 | update = False |
|
269 | update = False | |
267 | if ui.configbool('bookmarks', 'track.current'): |
|
270 | if ui.configbool('bookmarks', 'track.current'): | |
268 | mark = self._bookmarkcurrent |
|
271 | mark = self._bookmarkcurrent | |
269 | if mark and marks[mark] in parents: |
|
272 | if mark and marks[mark] in parents: | |
270 | marks[mark] = node |
|
273 | marks[mark] = node | |
271 | update = True |
|
274 | update = True | |
272 | else: |
|
275 | else: | |
273 | for mark, n in marks.items(): |
|
276 | for mark, n in marks.items(): | |
274 | if n in parents: |
|
277 | if n in parents: | |
275 | marks[mark] = node |
|
278 | marks[mark] = node | |
276 | update = True |
|
279 | update = True | |
277 | if update: |
|
280 | if update: | |
278 | write(self) |
|
281 | write(self) | |
279 |
|
282 | |||
280 | def commitctx(self, ctx, error=False): |
|
283 | def commitctx(self, ctx, error=False): | |
281 | """Add a revision to the repository and |
|
284 | """Add a revision to the repository and | |
282 | move the bookmark""" |
|
285 | move the bookmark""" | |
283 | wlock = self.wlock() # do both commit and bookmark with lock held |
|
286 | wlock = self.wlock() # do both commit and bookmark with lock held | |
284 | try: |
|
287 | try: | |
285 | node = super(bookmark_repo, self).commitctx(ctx, error) |
|
288 | node = super(bookmark_repo, self).commitctx(ctx, error) | |
286 | if node is None: |
|
289 | if node is None: | |
287 | return None |
|
290 | return None | |
288 | parents = self.changelog.parents(node) |
|
291 | parents = self.changelog.parents(node) | |
289 | if parents[1] == nullid: |
|
292 | if parents[1] == nullid: | |
290 | parents = (parents[0],) |
|
293 | parents = (parents[0],) | |
291 |
|
294 | |||
292 | self._bookmarksupdate(parents, node) |
|
295 | self._bookmarksupdate(parents, node) | |
293 | return node |
|
296 | return node | |
294 | finally: |
|
297 | finally: | |
295 | wlock.release() |
|
298 | wlock.release() | |
296 |
|
299 | |||
297 | def pull(self, remote, heads=None, force=False): |
|
300 | def pull(self, remote, heads=None, force=False): | |
298 | result = super(bookmark_repo, self).pull(remote, heads, force) |
|
301 | result = super(bookmark_repo, self).pull(remote, heads, force) | |
299 |
|
302 | |||
300 | self.ui.debug("checking for updated bookmarks\n") |
|
303 | self.ui.debug("checking for updated bookmarks\n") | |
301 | rb = remote.listkeys('bookmarks') |
|
304 | rb = remote.listkeys('bookmarks') | |
302 | changed = False |
|
305 | changed = False | |
303 | for k in rb.keys(): |
|
306 | for k in rb.keys(): | |
304 | if k in self._bookmarks: |
|
307 | if k in self._bookmarks: | |
305 | nr, nl = rb[k], self._bookmarks[k] |
|
308 | nr, nl = rb[k], self._bookmarks[k] | |
306 | if nr in self: |
|
309 | if nr in self: | |
307 | cr = self[nr] |
|
310 | cr = self[nr] | |
308 | cl = self[nl] |
|
311 | cl = self[nl] | |
309 | if cl.rev() >= cr.rev(): |
|
312 | if cl.rev() >= cr.rev(): | |
310 | continue |
|
313 | continue | |
311 | if cr in cl.descendants(): |
|
314 | if cr in cl.descendants(): | |
312 | self._bookmarks[k] = cr.node() |
|
315 | self._bookmarks[k] = cr.node() | |
313 | changed = True |
|
316 | changed = True | |
314 | self.ui.status(_("updating bookmark %s\n") % k) |
|
317 | self.ui.status(_("updating bookmark %s\n") % k) | |
315 | else: |
|
318 | else: | |
316 | self.ui.warn(_("not updating divergent" |
|
319 | self.ui.warn(_("not updating divergent" | |
317 | " bookmark %s\n") % k) |
|
320 | " bookmark %s\n") % k) | |
318 | if changed: |
|
321 | if changed: | |
319 | write(repo) |
|
322 | write(repo) | |
320 |
|
323 | |||
321 | return result |
|
324 | return result | |
322 |
|
325 | |||
323 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
326 | def push(self, remote, force=False, revs=None, newbranch=False): | |
324 | result = super(bookmark_repo, self).push(remote, force, revs, |
|
327 | result = super(bookmark_repo, self).push(remote, force, revs, | |
325 | newbranch) |
|
328 | newbranch) | |
326 |
|
329 | |||
327 | self.ui.debug("checking for updated bookmarks\n") |
|
330 | self.ui.debug("checking for updated bookmarks\n") | |
328 | rb = remote.listkeys('bookmarks') |
|
331 | rb = remote.listkeys('bookmarks') | |
329 | for k in rb.keys(): |
|
332 | for k in rb.keys(): | |
330 | if k in self._bookmarks: |
|
333 | if k in self._bookmarks: | |
331 | nr, nl = rb[k], self._bookmarks[k] |
|
334 | nr, nl = rb[k], self._bookmarks[k] | |
332 | if nr in self: |
|
335 | if nr in self: | |
333 | cr = self[nr] |
|
336 | cr = self[nr] | |
334 | cl = self[nl] |
|
337 | cl = self[nl] | |
335 | if cl in cr.descendants(): |
|
338 | if cl in cr.descendants(): | |
336 | r = remote.pushkey('bookmarks', k, nr, nl) |
|
339 | r = remote.pushkey('bookmarks', k, nr, nl) | |
337 | if r: |
|
340 | if r: | |
338 | self.ui.status(_("updating bookmark %s\n") % k) |
|
341 | self.ui.status(_("updating bookmark %s\n") % k) | |
339 | else: |
|
342 | else: | |
340 | self.ui.warn(_('updating bookmark %s' |
|
343 | self.ui.warn(_('updating bookmark %s' | |
341 | ' failed!\n') % k) |
|
344 | ' failed!\n') % k) | |
342 |
|
345 | |||
343 | return result |
|
346 | return result | |
344 |
|
347 | |||
345 | def addchangegroup(self, *args, **kwargs): |
|
348 | def addchangegroup(self, *args, **kwargs): | |
346 | parents = self.dirstate.parents() |
|
349 | parents = self.dirstate.parents() | |
347 |
|
350 | |||
348 | result = super(bookmark_repo, self).addchangegroup(*args, **kwargs) |
|
351 | result = super(bookmark_repo, self).addchangegroup(*args, **kwargs) | |
349 | if result > 1: |
|
352 | if result > 1: | |
350 | # We have more heads than before |
|
353 | # We have more heads than before | |
351 | return result |
|
354 | return result | |
352 | node = self.changelog.tip() |
|
355 | node = self.changelog.tip() | |
353 |
|
356 | |||
354 | self._bookmarksupdate(parents, node) |
|
357 | self._bookmarksupdate(parents, node) | |
355 | return result |
|
358 | return result | |
356 |
|
359 | |||
357 | def _findtags(self): |
|
360 | def _findtags(self): | |
358 | """Merge bookmarks with normal tags""" |
|
361 | """Merge bookmarks with normal tags""" | |
359 | (tags, tagtypes) = super(bookmark_repo, self)._findtags() |
|
362 | (tags, tagtypes) = super(bookmark_repo, self)._findtags() | |
|
363 | if not getattr(self, '_loadingbookmarks', False): | |||
360 | tags.update(self._bookmarks) |
|
364 | tags.update(self._bookmarks) | |
361 | return (tags, tagtypes) |
|
365 | return (tags, tagtypes) | |
362 |
|
366 | |||
363 | if hasattr(repo, 'invalidate'): |
|
367 | if hasattr(repo, 'invalidate'): | |
364 | def invalidate(self): |
|
368 | def invalidate(self): | |
365 | super(bookmark_repo, self).invalidate() |
|
369 | super(bookmark_repo, self).invalidate() | |
366 | for attr in ('_bookmarks', '_bookmarkcurrent'): |
|
370 | for attr in ('_bookmarks', '_bookmarkcurrent'): | |
367 | if attr in self.__dict__: |
|
371 | if attr in self.__dict__: | |
368 | delattr(self, attr) |
|
372 | delattr(self, attr) | |
369 |
|
373 | |||
370 | repo.__class__ = bookmark_repo |
|
374 | repo.__class__ = bookmark_repo | |
371 |
|
375 | |||
372 | def listbookmarks(repo): |
|
376 | def listbookmarks(repo): | |
373 | # We may try to list bookmarks on a repo type that does not |
|
377 | # We may try to list bookmarks on a repo type that does not | |
374 | # support it (e.g., statichttprepository). |
|
378 | # support it (e.g., statichttprepository). | |
375 | if not hasattr(repo, '_bookmarks'): |
|
379 | if not hasattr(repo, '_bookmarks'): | |
376 | return {} |
|
380 | return {} | |
377 |
|
381 | |||
378 | d = {} |
|
382 | d = {} | |
379 | for k, v in repo._bookmarks.iteritems(): |
|
383 | for k, v in repo._bookmarks.iteritems(): | |
380 | d[k] = hex(v) |
|
384 | d[k] = hex(v) | |
381 | return d |
|
385 | return d | |
382 |
|
386 | |||
383 | def pushbookmark(repo, key, old, new): |
|
387 | def pushbookmark(repo, key, old, new): | |
384 | w = repo.wlock() |
|
388 | w = repo.wlock() | |
385 | try: |
|
389 | try: | |
386 | marks = repo._bookmarks |
|
390 | marks = repo._bookmarks | |
387 | if hex(marks.get(key, '')) != old: |
|
391 | if hex(marks.get(key, '')) != old: | |
388 | return False |
|
392 | return False | |
389 | if new == '': |
|
393 | if new == '': | |
390 | del marks[key] |
|
394 | del marks[key] | |
391 | else: |
|
395 | else: | |
392 | if new not in repo: |
|
396 | if new not in repo: | |
393 | return False |
|
397 | return False | |
394 | marks[key] = repo[new].node() |
|
398 | marks[key] = repo[new].node() | |
395 | write(repo) |
|
399 | write(repo) | |
396 | return True |
|
400 | return True | |
397 | finally: |
|
401 | finally: | |
398 | w.release() |
|
402 | w.release() | |
399 |
|
403 | |||
400 | def pull(oldpull, ui, repo, source="default", **opts): |
|
404 | def pull(oldpull, ui, repo, source="default", **opts): | |
401 | # translate bookmark args to rev args for actual pull |
|
405 | # translate bookmark args to rev args for actual pull | |
402 | if opts.get('bookmark'): |
|
406 | if opts.get('bookmark'): | |
403 | # this is an unpleasant hack as pull will do this internally |
|
407 | # this is an unpleasant hack as pull will do this internally | |
404 | source, branches = hg.parseurl(ui.expandpath(source), |
|
408 | source, branches = hg.parseurl(ui.expandpath(source), | |
405 | opts.get('branch')) |
|
409 | opts.get('branch')) | |
406 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
410 | other = hg.repository(hg.remoteui(repo, opts), source) | |
407 | rb = other.listkeys('bookmarks') |
|
411 | rb = other.listkeys('bookmarks') | |
408 |
|
412 | |||
409 | for b in opts['bookmark']: |
|
413 | for b in opts['bookmark']: | |
410 | if b not in rb: |
|
414 | if b not in rb: | |
411 | raise util.Abort(_('remote bookmark %s not found!') % b) |
|
415 | raise util.Abort(_('remote bookmark %s not found!') % b) | |
412 | opts.setdefault('rev', []).append(b) |
|
416 | opts.setdefault('rev', []).append(b) | |
413 |
|
417 | |||
414 | result = oldpull(ui, repo, source, **opts) |
|
418 | result = oldpull(ui, repo, source, **opts) | |
415 |
|
419 | |||
416 | # update specified bookmarks |
|
420 | # update specified bookmarks | |
417 | if opts.get('bookmark'): |
|
421 | if opts.get('bookmark'): | |
418 | for b in opts['bookmark']: |
|
422 | for b in opts['bookmark']: | |
419 | # explicit pull overrides local bookmark if any |
|
423 | # explicit pull overrides local bookmark if any | |
420 | ui.status(_("importing bookmark %s\n") % b) |
|
424 | ui.status(_("importing bookmark %s\n") % b) | |
421 | repo._bookmarks[b] = repo[rb[b]].node() |
|
425 | repo._bookmarks[b] = repo[rb[b]].node() | |
422 | write(repo) |
|
426 | write(repo) | |
423 |
|
427 | |||
424 | return result |
|
428 | return result | |
425 |
|
429 | |||
426 | def push(oldpush, ui, repo, dest=None, **opts): |
|
430 | def push(oldpush, ui, repo, dest=None, **opts): | |
427 | dopush = True |
|
431 | dopush = True | |
428 | if opts.get('bookmark'): |
|
432 | if opts.get('bookmark'): | |
429 | dopush = False |
|
433 | dopush = False | |
430 | for b in opts['bookmark']: |
|
434 | for b in opts['bookmark']: | |
431 | if b in repo._bookmarks: |
|
435 | if b in repo._bookmarks: | |
432 | dopush = True |
|
436 | dopush = True | |
433 | opts.setdefault('rev', []).append(b) |
|
437 | opts.setdefault('rev', []).append(b) | |
434 |
|
438 | |||
435 | result = 0 |
|
439 | result = 0 | |
436 | if dopush: |
|
440 | if dopush: | |
437 | result = oldpush(ui, repo, dest, **opts) |
|
441 | result = oldpush(ui, repo, dest, **opts) | |
438 |
|
442 | |||
439 | if opts.get('bookmark'): |
|
443 | if opts.get('bookmark'): | |
440 | # this is an unpleasant hack as push will do this internally |
|
444 | # this is an unpleasant hack as push will do this internally | |
441 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
445 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
442 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
446 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
443 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
447 | other = hg.repository(hg.remoteui(repo, opts), dest) | |
444 | rb = other.listkeys('bookmarks') |
|
448 | rb = other.listkeys('bookmarks') | |
445 | for b in opts['bookmark']: |
|
449 | for b in opts['bookmark']: | |
446 | # explicit push overrides remote bookmark if any |
|
450 | # explicit push overrides remote bookmark if any | |
447 | if b in repo._bookmarks: |
|
451 | if b in repo._bookmarks: | |
448 | ui.status(_("exporting bookmark %s\n") % b) |
|
452 | ui.status(_("exporting bookmark %s\n") % b) | |
449 | new = repo[b].hex() |
|
453 | new = repo[b].hex() | |
450 | elif b in rb: |
|
454 | elif b in rb: | |
451 | ui.status(_("deleting remote bookmark %s\n") % b) |
|
455 | ui.status(_("deleting remote bookmark %s\n") % b) | |
452 | new = '' # delete |
|
456 | new = '' # delete | |
453 | else: |
|
457 | else: | |
454 | ui.warn(_('bookmark %s does not exist on the local ' |
|
458 | ui.warn(_('bookmark %s does not exist on the local ' | |
455 | 'or remote repository!\n') % b) |
|
459 | 'or remote repository!\n') % b) | |
456 | return 2 |
|
460 | return 2 | |
457 | old = rb.get(b, '') |
|
461 | old = rb.get(b, '') | |
458 | r = other.pushkey('bookmarks', b, old, new) |
|
462 | r = other.pushkey('bookmarks', b, old, new) | |
459 | if not r: |
|
463 | if not r: | |
460 | ui.warn(_('updating bookmark %s failed!\n') % b) |
|
464 | ui.warn(_('updating bookmark %s failed!\n') % b) | |
461 | if not result: |
|
465 | if not result: | |
462 | result = 2 |
|
466 | result = 2 | |
463 |
|
467 | |||
464 | return result |
|
468 | return result | |
465 |
|
469 | |||
466 | def diffbookmarks(ui, repo, remote): |
|
470 | def diffbookmarks(ui, repo, remote): | |
467 | ui.status(_("searching for changes\n")) |
|
471 | ui.status(_("searching for changes\n")) | |
468 |
|
472 | |||
469 | lmarks = repo.listkeys('bookmarks') |
|
473 | lmarks = repo.listkeys('bookmarks') | |
470 | rmarks = remote.listkeys('bookmarks') |
|
474 | rmarks = remote.listkeys('bookmarks') | |
471 |
|
475 | |||
472 | diff = sorted(set(rmarks) - set(lmarks)) |
|
476 | diff = sorted(set(rmarks) - set(lmarks)) | |
473 | for k in diff: |
|
477 | for k in diff: | |
474 | ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) |
|
478 | ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) | |
475 |
|
479 | |||
476 | if len(diff) <= 0: |
|
480 | if len(diff) <= 0: | |
477 | ui.status(_("no changes found\n")) |
|
481 | ui.status(_("no changes found\n")) | |
478 | return 1 |
|
482 | return 1 | |
479 | return 0 |
|
483 | return 0 | |
480 |
|
484 | |||
481 | def incoming(oldincoming, ui, repo, source="default", **opts): |
|
485 | def incoming(oldincoming, ui, repo, source="default", **opts): | |
482 | if opts.get('bookmarks'): |
|
486 | if opts.get('bookmarks'): | |
483 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
487 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) | |
484 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
488 | other = hg.repository(hg.remoteui(repo, opts), source) | |
485 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
489 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) | |
486 | return diffbookmarks(ui, repo, other) |
|
490 | return diffbookmarks(ui, repo, other) | |
487 | else: |
|
491 | else: | |
488 | return oldincoming(ui, repo, source, **opts) |
|
492 | return oldincoming(ui, repo, source, **opts) | |
489 |
|
493 | |||
490 | def outgoing(oldoutgoing, ui, repo, dest=None, **opts): |
|
494 | def outgoing(oldoutgoing, ui, repo, dest=None, **opts): | |
491 | if opts.get('bookmarks'): |
|
495 | if opts.get('bookmarks'): | |
492 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
496 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
493 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
497 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
494 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
498 | other = hg.repository(hg.remoteui(repo, opts), dest) | |
495 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
499 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) | |
496 | return diffbookmarks(ui, other, repo) |
|
500 | return diffbookmarks(ui, other, repo) | |
497 | else: |
|
501 | else: | |
498 | return oldoutgoing(ui, repo, dest, **opts) |
|
502 | return oldoutgoing(ui, repo, dest, **opts) | |
499 |
|
503 | |||
500 | def uisetup(ui): |
|
504 | def uisetup(ui): | |
501 | extensions.wrapfunction(repair, "strip", strip) |
|
505 | extensions.wrapfunction(repair, "strip", strip) | |
502 | if ui.configbool('bookmarks', 'track.current'): |
|
506 | if ui.configbool('bookmarks', 'track.current'): | |
503 | extensions.wrapcommand(commands.table, 'update', updatecurbookmark) |
|
507 | extensions.wrapcommand(commands.table, 'update', updatecurbookmark) | |
504 |
|
508 | |||
505 | entry = extensions.wrapcommand(commands.table, 'pull', pull) |
|
509 | entry = extensions.wrapcommand(commands.table, 'pull', pull) | |
506 | entry[1].append(('B', 'bookmark', [], |
|
510 | entry[1].append(('B', 'bookmark', [], | |
507 | _("bookmark to import"), |
|
511 | _("bookmark to import"), | |
508 | _('BOOKMARK'))) |
|
512 | _('BOOKMARK'))) | |
509 | entry = extensions.wrapcommand(commands.table, 'push', push) |
|
513 | entry = extensions.wrapcommand(commands.table, 'push', push) | |
510 | entry[1].append(('B', 'bookmark', [], |
|
514 | entry[1].append(('B', 'bookmark', [], | |
511 | _("bookmark to export"), |
|
515 | _("bookmark to export"), | |
512 | _('BOOKMARK'))) |
|
516 | _('BOOKMARK'))) | |
513 | entry = extensions.wrapcommand(commands.table, 'incoming', incoming) |
|
517 | entry = extensions.wrapcommand(commands.table, 'incoming', incoming) | |
514 | entry[1].append(('B', 'bookmarks', False, |
|
518 | entry[1].append(('B', 'bookmarks', False, | |
515 | _("compare bookmark"))) |
|
519 | _("compare bookmark"))) | |
516 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) |
|
520 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) | |
517 | entry[1].append(('B', 'bookmarks', False, |
|
521 | entry[1].append(('B', 'bookmarks', False, | |
518 | _("compare bookmark"))) |
|
522 | _("compare bookmark"))) | |
519 |
|
523 | |||
520 | pushkey.register('bookmarks', pushbookmark, listbookmarks) |
|
524 | pushkey.register('bookmarks', pushbookmark, listbookmarks) | |
521 |
|
525 | |||
522 | def updatecurbookmark(orig, ui, repo, *args, **opts): |
|
526 | def updatecurbookmark(orig, ui, repo, *args, **opts): | |
523 | '''Set the current bookmark |
|
527 | '''Set the current bookmark | |
524 |
|
528 | |||
525 | If the user updates to a bookmark we update the .hg/bookmarks.current |
|
529 | If the user updates to a bookmark we update the .hg/bookmarks.current | |
526 | file. |
|
530 | file. | |
527 | ''' |
|
531 | ''' | |
528 | res = orig(ui, repo, *args, **opts) |
|
532 | res = orig(ui, repo, *args, **opts) | |
529 | rev = opts['rev'] |
|
533 | rev = opts['rev'] | |
530 | if not rev and len(args) > 0: |
|
534 | if not rev and len(args) > 0: | |
531 | rev = args[0] |
|
535 | rev = args[0] | |
532 | setcurrent(repo, rev) |
|
536 | setcurrent(repo, rev) | |
533 | return res |
|
537 | return res | |
534 |
|
538 | |||
535 | cmdtable = { |
|
539 | cmdtable = { | |
536 | "bookmarks": |
|
540 | "bookmarks": | |
537 | (bookmark, |
|
541 | (bookmark, | |
538 | [('f', 'force', False, _('force')), |
|
542 | [('f', 'force', False, _('force')), | |
539 | ('r', 'rev', '', _('revision'), _('REV')), |
|
543 | ('r', 'rev', '', _('revision'), _('REV')), | |
540 | ('d', 'delete', False, _('delete a given bookmark')), |
|
544 | ('d', 'delete', False, _('delete a given bookmark')), | |
541 | ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))], |
|
545 | ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))], | |
542 | _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')), |
|
546 | _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')), | |
543 | } |
|
547 | } | |
544 |
|
548 | |||
545 | colortable = {'bookmarks.current': 'green'} |
|
549 | colortable = {'bookmarks.current': 'green'} |
@@ -1,173 +1,188 b'' | |||||
1 | # darcs.py - darcs support for the convert extension |
|
1 | # darcs.py - darcs support for the convert extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others |
|
3 | # Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from common import NoRepo, checktool, commandline, commit, converter_source |
|
8 | from common import NoRepo, checktool, commandline, commit, converter_source | |
9 | from mercurial.i18n import _ |
|
9 | from mercurial.i18n import _ | |
10 | from mercurial import util |
|
10 | from mercurial import util | |
11 | import os, shutil, tempfile |
|
11 | import os, shutil, tempfile, re | |
12 |
|
12 | |||
13 | # The naming drift of ElementTree is fun! |
|
13 | # The naming drift of ElementTree is fun! | |
14 |
|
14 | |||
15 | try: |
|
15 | try: | |
16 | from xml.etree.cElementTree import ElementTree |
|
16 | from xml.etree.cElementTree import ElementTree | |
17 | except ImportError: |
|
17 | except ImportError: | |
18 | try: |
|
18 | try: | |
19 | from xml.etree.ElementTree import ElementTree |
|
19 | from xml.etree.ElementTree import ElementTree | |
20 | except ImportError: |
|
20 | except ImportError: | |
21 | try: |
|
21 | try: | |
22 | from elementtree.cElementTree import ElementTree |
|
22 | from elementtree.cElementTree import ElementTree | |
23 | except ImportError: |
|
23 | except ImportError: | |
24 | try: |
|
24 | try: | |
25 | from elementtree.ElementTree import ElementTree |
|
25 | from elementtree.ElementTree import ElementTree | |
26 | except ImportError: |
|
26 | except ImportError: | |
27 | ElementTree = None |
|
27 | ElementTree = None | |
28 |
|
28 | |||
29 | class darcs_source(converter_source, commandline): |
|
29 | class darcs_source(converter_source, commandline): | |
30 | def __init__(self, ui, path, rev=None): |
|
30 | def __init__(self, ui, path, rev=None): | |
31 | converter_source.__init__(self, ui, path, rev=rev) |
|
31 | converter_source.__init__(self, ui, path, rev=rev) | |
32 | commandline.__init__(self, ui, 'darcs') |
|
32 | commandline.__init__(self, ui, 'darcs') | |
33 |
|
33 | |||
34 |
# check for _darcs, ElementTree |
|
34 | # check for _darcs, ElementTree so that we can easily skip | |
35 |
# |
|
35 | # test-convert-darcs if ElementTree is not around | |
36 | if not os.path.exists(os.path.join(path, '_darcs', 'inventories')): |
|
|||
37 | raise NoRepo(_("%s does not look like a darcs repository") % path) |
|
|||
38 |
|
||||
39 | if not os.path.exists(os.path.join(path, '_darcs')): |
|
36 | if not os.path.exists(os.path.join(path, '_darcs')): | |
40 | raise NoRepo(_("%s does not look like a darcs repository") % path) |
|
37 | raise NoRepo(_("%s does not look like a darcs repository") % path) | |
41 |
|
38 | |||
42 | checktool('darcs') |
|
39 | checktool('darcs') | |
43 | version = self.run0('--version').splitlines()[0].strip() |
|
40 | version = self.run0('--version').splitlines()[0].strip() | |
44 | if version < '2.1': |
|
41 | if version < '2.1': | |
45 | raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') % |
|
42 | raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') % | |
46 | version) |
|
43 | version) | |
47 |
|
44 | |||
48 | if ElementTree is None: |
|
45 | if ElementTree is None: | |
49 | raise util.Abort(_("Python ElementTree module is not available")) |
|
46 | raise util.Abort(_("Python ElementTree module is not available")) | |
50 |
|
47 | |||
51 | self.path = os.path.realpath(path) |
|
48 | self.path = os.path.realpath(path) | |
52 |
|
49 | |||
53 | self.lastrev = None |
|
50 | self.lastrev = None | |
54 | self.changes = {} |
|
51 | self.changes = {} | |
55 | self.parents = {} |
|
52 | self.parents = {} | |
56 | self.tags = {} |
|
53 | self.tags = {} | |
57 |
|
54 | |||
|
55 | # Check darcs repository format | |||
|
56 | format = self.format() | |||
|
57 | if format: | |||
|
58 | if format in ('darcs-1.0', 'hashed'): | |||
|
59 | raise NoRepo(_("%s repository format is unsupported, " | |||
|
60 | "please upgrade") % format) | |||
|
61 | else: | |||
|
62 | self.ui.warn(_('failed to detect repository format!')) | |||
|
63 | ||||
58 | def before(self): |
|
64 | def before(self): | |
59 | self.tmppath = tempfile.mkdtemp( |
|
65 | self.tmppath = tempfile.mkdtemp( | |
60 | prefix='convert-' + os.path.basename(self.path) + '-') |
|
66 | prefix='convert-' + os.path.basename(self.path) + '-') | |
61 | output, status = self.run('init', repodir=self.tmppath) |
|
67 | output, status = self.run('init', repodir=self.tmppath) | |
62 | self.checkexit(status) |
|
68 | self.checkexit(status) | |
63 |
|
69 | |||
64 | tree = self.xml('changes', xml_output=True, summary=True, |
|
70 | tree = self.xml('changes', xml_output=True, summary=True, | |
65 | repodir=self.path) |
|
71 | repodir=self.path) | |
66 | tagname = None |
|
72 | tagname = None | |
67 | child = None |
|
73 | child = None | |
68 | for elt in tree.findall('patch'): |
|
74 | for elt in tree.findall('patch'): | |
69 | node = elt.get('hash') |
|
75 | node = elt.get('hash') | |
70 | name = elt.findtext('name', '') |
|
76 | name = elt.findtext('name', '') | |
71 | if name.startswith('TAG '): |
|
77 | if name.startswith('TAG '): | |
72 | tagname = name[4:].strip() |
|
78 | tagname = name[4:].strip() | |
73 | elif tagname is not None: |
|
79 | elif tagname is not None: | |
74 | self.tags[tagname] = node |
|
80 | self.tags[tagname] = node | |
75 | tagname = None |
|
81 | tagname = None | |
76 | self.changes[node] = elt |
|
82 | self.changes[node] = elt | |
77 | self.parents[child] = [node] |
|
83 | self.parents[child] = [node] | |
78 | child = node |
|
84 | child = node | |
79 | self.parents[child] = [] |
|
85 | self.parents[child] = [] | |
80 |
|
86 | |||
81 | def after(self): |
|
87 | def after(self): | |
82 | self.ui.debug('cleaning up %s\n' % self.tmppath) |
|
88 | self.ui.debug('cleaning up %s\n' % self.tmppath) | |
83 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
89 | shutil.rmtree(self.tmppath, ignore_errors=True) | |
84 |
|
90 | |||
85 | def xml(self, cmd, **kwargs): |
|
91 | def xml(self, cmd, **kwargs): | |
86 | # NOTE: darcs is currently encoding agnostic and will print |
|
92 | # NOTE: darcs is currently encoding agnostic and will print | |
87 | # patch metadata byte-for-byte, even in the XML changelog. |
|
93 | # patch metadata byte-for-byte, even in the XML changelog. | |
88 | etree = ElementTree() |
|
94 | etree = ElementTree() | |
89 | fp = self._run(cmd, **kwargs) |
|
95 | fp = self._run(cmd, **kwargs) | |
90 | etree.parse(fp) |
|
96 | etree.parse(fp) | |
91 | self.checkexit(fp.close()) |
|
97 | self.checkexit(fp.close()) | |
92 | return etree.getroot() |
|
98 | return etree.getroot() | |
93 |
|
99 | |||
|
100 | def format(self): | |||
|
101 | output, status = self.run('show', 'repo', no_files=True, | |||
|
102 | repodir=self.path) | |||
|
103 | self.checkexit(status) | |||
|
104 | m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE) | |||
|
105 | if not m: | |||
|
106 | return None | |||
|
107 | return ','.join(sorted(f.strip() for f in m.group(1).split(','))) | |||
|
108 | ||||
94 | def manifest(self): |
|
109 | def manifest(self): | |
95 | man = [] |
|
110 | man = [] | |
96 | output, status = self.run('show', 'files', no_directories=True, |
|
111 | output, status = self.run('show', 'files', no_directories=True, | |
97 | repodir=self.tmppath) |
|
112 | repodir=self.tmppath) | |
98 | self.checkexit(status) |
|
113 | self.checkexit(status) | |
99 | for line in output.split('\n'): |
|
114 | for line in output.split('\n'): | |
100 | path = line[2:] |
|
115 | path = line[2:] | |
101 | if path: |
|
116 | if path: | |
102 | man.append(path) |
|
117 | man.append(path) | |
103 | return man |
|
118 | return man | |
104 |
|
119 | |||
105 | def getheads(self): |
|
120 | def getheads(self): | |
106 | return self.parents[None] |
|
121 | return self.parents[None] | |
107 |
|
122 | |||
108 | def getcommit(self, rev): |
|
123 | def getcommit(self, rev): | |
109 | elt = self.changes[rev] |
|
124 | elt = self.changes[rev] | |
110 | date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') |
|
125 | date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') | |
111 | desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') |
|
126 | desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') | |
112 | # etree can return unicode objects for name, comment, and author, |
|
127 | # etree can return unicode objects for name, comment, and author, | |
113 | # so recode() is used to ensure str objects are emitted. |
|
128 | # so recode() is used to ensure str objects are emitted. | |
114 | return commit(author=self.recode(elt.get('author')), |
|
129 | return commit(author=self.recode(elt.get('author')), | |
115 | date=util.datestr(date), |
|
130 | date=util.datestr(date), | |
116 | desc=self.recode(desc).strip(), |
|
131 | desc=self.recode(desc).strip(), | |
117 | parents=self.parents[rev]) |
|
132 | parents=self.parents[rev]) | |
118 |
|
133 | |||
119 | def pull(self, rev): |
|
134 | def pull(self, rev): | |
120 | output, status = self.run('pull', self.path, all=True, |
|
135 | output, status = self.run('pull', self.path, all=True, | |
121 | match='hash %s' % rev, |
|
136 | match='hash %s' % rev, | |
122 | no_test=True, no_posthook=True, |
|
137 | no_test=True, no_posthook=True, | |
123 | external_merge='/bin/false', |
|
138 | external_merge='/bin/false', | |
124 | repodir=self.tmppath) |
|
139 | repodir=self.tmppath) | |
125 | if status: |
|
140 | if status: | |
126 | if output.find('We have conflicts in') == -1: |
|
141 | if output.find('We have conflicts in') == -1: | |
127 | self.checkexit(status, output) |
|
142 | self.checkexit(status, output) | |
128 | output, status = self.run('revert', all=True, repodir=self.tmppath) |
|
143 | output, status = self.run('revert', all=True, repodir=self.tmppath) | |
129 | self.checkexit(status, output) |
|
144 | self.checkexit(status, output) | |
130 |
|
145 | |||
131 | def getchanges(self, rev): |
|
146 | def getchanges(self, rev): | |
132 | copies = {} |
|
147 | copies = {} | |
133 | changes = [] |
|
148 | changes = [] | |
134 | man = None |
|
149 | man = None | |
135 | for elt in self.changes[rev].find('summary').getchildren(): |
|
150 | for elt in self.changes[rev].find('summary').getchildren(): | |
136 | if elt.tag in ('add_directory', 'remove_directory'): |
|
151 | if elt.tag in ('add_directory', 'remove_directory'): | |
137 | continue |
|
152 | continue | |
138 | if elt.tag == 'move': |
|
153 | if elt.tag == 'move': | |
139 | if man is None: |
|
154 | if man is None: | |
140 | man = self.manifest() |
|
155 | man = self.manifest() | |
141 | source, dest = elt.get('from'), elt.get('to') |
|
156 | source, dest = elt.get('from'), elt.get('to') | |
142 | if source in man: |
|
157 | if source in man: | |
143 | # File move |
|
158 | # File move | |
144 | changes.append((source, rev)) |
|
159 | changes.append((source, rev)) | |
145 | changes.append((dest, rev)) |
|
160 | changes.append((dest, rev)) | |
146 | copies[dest] = source |
|
161 | copies[dest] = source | |
147 | else: |
|
162 | else: | |
148 | # Directory move, deduce file moves from manifest |
|
163 | # Directory move, deduce file moves from manifest | |
149 | source = source + '/' |
|
164 | source = source + '/' | |
150 | for f in man: |
|
165 | for f in man: | |
151 | if not f.startswith(source): |
|
166 | if not f.startswith(source): | |
152 | continue |
|
167 | continue | |
153 | fdest = dest + '/' + f[len(source):] |
|
168 | fdest = dest + '/' + f[len(source):] | |
154 | changes.append((f, rev)) |
|
169 | changes.append((f, rev)) | |
155 | changes.append((fdest, rev)) |
|
170 | changes.append((fdest, rev)) | |
156 | copies[fdest] = f |
|
171 | copies[fdest] = f | |
157 | else: |
|
172 | else: | |
158 | changes.append((elt.text.strip(), rev)) |
|
173 | changes.append((elt.text.strip(), rev)) | |
159 | self.pull(rev) |
|
174 | self.pull(rev) | |
160 | self.lastrev = rev |
|
175 | self.lastrev = rev | |
161 | return sorted(changes), copies |
|
176 | return sorted(changes), copies | |
162 |
|
177 | |||
163 | def getfile(self, name, rev): |
|
178 | def getfile(self, name, rev): | |
164 | if rev != self.lastrev: |
|
179 | if rev != self.lastrev: | |
165 | raise util.Abort(_('internal calling inconsistency')) |
|
180 | raise util.Abort(_('internal calling inconsistency')) | |
166 | path = os.path.join(self.tmppath, name) |
|
181 | path = os.path.join(self.tmppath, name) | |
167 | data = open(path, 'rb').read() |
|
182 | data = open(path, 'rb').read() | |
168 | mode = os.lstat(path).st_mode |
|
183 | mode = os.lstat(path).st_mode | |
169 | mode = (mode & 0111) and 'x' or '' |
|
184 | mode = (mode & 0111) and 'x' or '' | |
170 | return data, mode |
|
185 | return data, mode | |
171 |
|
186 | |||
172 | def gettags(self): |
|
187 | def gettags(self): | |
173 | return self.tags |
|
188 | return self.tags |
@@ -1,677 +1,674 b'' | |||||
1 | # url.py - HTTP handling for mercurial |
|
1 | # url.py - HTTP handling for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> |
|
4 | # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> | |
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | import urllib, urllib2, urlparse, httplib, os, re, socket, cStringIO |
|
10 | import urllib, urllib2, urlparse, httplib, os, re, socket, cStringIO | |
11 | import __builtin__ |
|
11 | import __builtin__ | |
12 | from i18n import _ |
|
12 | from i18n import _ | |
13 | import keepalive, util |
|
13 | import keepalive, util | |
14 |
|
14 | |||
15 | def _urlunparse(scheme, netloc, path, params, query, fragment, url): |
|
15 | def _urlunparse(scheme, netloc, path, params, query, fragment, url): | |
16 | '''Handle cases where urlunparse(urlparse(x://)) doesn't preserve the "//"''' |
|
16 | '''Handle cases where urlunparse(urlparse(x://)) doesn't preserve the "//"''' | |
17 | result = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) |
|
17 | result = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) | |
18 | if (scheme and |
|
18 | if (scheme and | |
19 | result.startswith(scheme + ':') and |
|
19 | result.startswith(scheme + ':') and | |
20 | not result.startswith(scheme + '://') and |
|
20 | not result.startswith(scheme + '://') and | |
21 | url.startswith(scheme + '://') |
|
21 | url.startswith(scheme + '://') | |
22 | ): |
|
22 | ): | |
23 | result = scheme + '://' + result[len(scheme + ':'):] |
|
23 | result = scheme + '://' + result[len(scheme + ':'):] | |
24 | return result |
|
24 | return result | |
25 |
|
25 | |||
26 | def hidepassword(url): |
|
26 | def hidepassword(url): | |
27 | '''hide user credential in a url string''' |
|
27 | '''hide user credential in a url string''' | |
28 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) |
|
28 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) | |
29 | netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc) |
|
29 | netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc) | |
30 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) |
|
30 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) | |
31 |
|
31 | |||
32 | def removeauth(url): |
|
32 | def removeauth(url): | |
33 | '''remove all authentication information from a url string''' |
|
33 | '''remove all authentication information from a url string''' | |
34 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) |
|
34 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) | |
35 | netloc = netloc[netloc.find('@')+1:] |
|
35 | netloc = netloc[netloc.find('@')+1:] | |
36 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) |
|
36 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) | |
37 |
|
37 | |||
38 | def netlocsplit(netloc): |
|
38 | def netlocsplit(netloc): | |
39 | '''split [user[:passwd]@]host[:port] into 4-tuple.''' |
|
39 | '''split [user[:passwd]@]host[:port] into 4-tuple.''' | |
40 |
|
40 | |||
41 | a = netloc.find('@') |
|
41 | a = netloc.find('@') | |
42 | if a == -1: |
|
42 | if a == -1: | |
43 | user, passwd = None, None |
|
43 | user, passwd = None, None | |
44 | else: |
|
44 | else: | |
45 | userpass, netloc = netloc[:a], netloc[a + 1:] |
|
45 | userpass, netloc = netloc[:a], netloc[a + 1:] | |
46 | c = userpass.find(':') |
|
46 | c = userpass.find(':') | |
47 | if c == -1: |
|
47 | if c == -1: | |
48 | user, passwd = urllib.unquote(userpass), None |
|
48 | user, passwd = urllib.unquote(userpass), None | |
49 | else: |
|
49 | else: | |
50 | user = urllib.unquote(userpass[:c]) |
|
50 | user = urllib.unquote(userpass[:c]) | |
51 | passwd = urllib.unquote(userpass[c + 1:]) |
|
51 | passwd = urllib.unquote(userpass[c + 1:]) | |
52 | c = netloc.find(':') |
|
52 | c = netloc.find(':') | |
53 | if c == -1: |
|
53 | if c == -1: | |
54 | host, port = netloc, None |
|
54 | host, port = netloc, None | |
55 | else: |
|
55 | else: | |
56 | host, port = netloc[:c], netloc[c + 1:] |
|
56 | host, port = netloc[:c], netloc[c + 1:] | |
57 | return host, port, user, passwd |
|
57 | return host, port, user, passwd | |
58 |
|
58 | |||
59 | def netlocunsplit(host, port, user=None, passwd=None): |
|
59 | def netlocunsplit(host, port, user=None, passwd=None): | |
60 | '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' |
|
60 | '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' | |
61 | if port: |
|
61 | if port: | |
62 | hostport = host + ':' + port |
|
62 | hostport = host + ':' + port | |
63 | else: |
|
63 | else: | |
64 | hostport = host |
|
64 | hostport = host | |
65 | if user: |
|
65 | if user: | |
66 | quote = lambda s: urllib.quote(s, safe='') |
|
66 | quote = lambda s: urllib.quote(s, safe='') | |
67 | if passwd: |
|
67 | if passwd: | |
68 | userpass = quote(user) + ':' + quote(passwd) |
|
68 | userpass = quote(user) + ':' + quote(passwd) | |
69 | else: |
|
69 | else: | |
70 | userpass = quote(user) |
|
70 | userpass = quote(user) | |
71 | return userpass + '@' + hostport |
|
71 | return userpass + '@' + hostport | |
72 | return hostport |
|
72 | return hostport | |
73 |
|
73 | |||
74 | _safe = ('abcdefghijklmnopqrstuvwxyz' |
|
74 | _safe = ('abcdefghijklmnopqrstuvwxyz' | |
75 | 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' |
|
75 | 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' | |
76 | '0123456789' '_.-/') |
|
76 | '0123456789' '_.-/') | |
77 | _safeset = None |
|
77 | _safeset = None | |
78 | _hex = None |
|
78 | _hex = None | |
79 | def quotepath(path): |
|
79 | def quotepath(path): | |
80 | '''quote the path part of a URL |
|
80 | '''quote the path part of a URL | |
81 |
|
81 | |||
82 | This is similar to urllib.quote, but it also tries to avoid |
|
82 | This is similar to urllib.quote, but it also tries to avoid | |
83 | quoting things twice (inspired by wget): |
|
83 | quoting things twice (inspired by wget): | |
84 |
|
84 | |||
85 | >>> quotepath('abc def') |
|
85 | >>> quotepath('abc def') | |
86 | 'abc%20def' |
|
86 | 'abc%20def' | |
87 | >>> quotepath('abc%20def') |
|
87 | >>> quotepath('abc%20def') | |
88 | 'abc%20def' |
|
88 | 'abc%20def' | |
89 | >>> quotepath('abc%20 def') |
|
89 | >>> quotepath('abc%20 def') | |
90 | 'abc%20%20def' |
|
90 | 'abc%20%20def' | |
91 | >>> quotepath('abc def%20') |
|
91 | >>> quotepath('abc def%20') | |
92 | 'abc%20def%20' |
|
92 | 'abc%20def%20' | |
93 | >>> quotepath('abc def%2') |
|
93 | >>> quotepath('abc def%2') | |
94 | 'abc%20def%252' |
|
94 | 'abc%20def%252' | |
95 | >>> quotepath('abc def%') |
|
95 | >>> quotepath('abc def%') | |
96 | 'abc%20def%25' |
|
96 | 'abc%20def%25' | |
97 | ''' |
|
97 | ''' | |
98 | global _safeset, _hex |
|
98 | global _safeset, _hex | |
99 | if _safeset is None: |
|
99 | if _safeset is None: | |
100 | _safeset = set(_safe) |
|
100 | _safeset = set(_safe) | |
101 | _hex = set('abcdefABCDEF0123456789') |
|
101 | _hex = set('abcdefABCDEF0123456789') | |
102 | l = list(path) |
|
102 | l = list(path) | |
103 | for i in xrange(len(l)): |
|
103 | for i in xrange(len(l)): | |
104 | c = l[i] |
|
104 | c = l[i] | |
105 | if (c == '%' and i + 2 < len(l) and |
|
105 | if (c == '%' and i + 2 < len(l) and | |
106 | l[i + 1] in _hex and l[i + 2] in _hex): |
|
106 | l[i + 1] in _hex and l[i + 2] in _hex): | |
107 | pass |
|
107 | pass | |
108 | elif c not in _safeset: |
|
108 | elif c not in _safeset: | |
109 | l[i] = '%%%02X' % ord(c) |
|
109 | l[i] = '%%%02X' % ord(c) | |
110 | return ''.join(l) |
|
110 | return ''.join(l) | |
111 |
|
111 | |||
112 | class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): |
|
112 | class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): | |
113 | def __init__(self, ui): |
|
113 | def __init__(self, ui): | |
114 | urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) |
|
114 | urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) | |
115 | self.ui = ui |
|
115 | self.ui = ui | |
116 |
|
116 | |||
117 | def find_user_password(self, realm, authuri): |
|
117 | def find_user_password(self, realm, authuri): | |
118 | authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( |
|
118 | authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( | |
119 | self, realm, authuri) |
|
119 | self, realm, authuri) | |
120 | user, passwd = authinfo |
|
120 | user, passwd = authinfo | |
121 | if user and passwd: |
|
121 | if user and passwd: | |
122 | self._writedebug(user, passwd) |
|
122 | self._writedebug(user, passwd) | |
123 | return (user, passwd) |
|
123 | return (user, passwd) | |
124 |
|
124 | |||
125 | if not user: |
|
125 | if not user: | |
126 | auth = self.readauthtoken(authuri) |
|
126 | auth = self.readauthtoken(authuri) | |
127 | if auth: |
|
127 | if auth: | |
128 | user, passwd = auth.get('username'), auth.get('password') |
|
128 | user, passwd = auth.get('username'), auth.get('password') | |
129 | if not user or not passwd: |
|
129 | if not user or not passwd: | |
130 | if not self.ui.interactive(): |
|
130 | if not self.ui.interactive(): | |
131 | raise util.Abort(_('http authorization required')) |
|
131 | raise util.Abort(_('http authorization required')) | |
132 |
|
132 | |||
133 | self.ui.write(_("http authorization required\n")) |
|
133 | self.ui.write(_("http authorization required\n")) | |
134 | self.ui.status(_("realm: %s\n") % realm) |
|
134 | self.ui.status(_("realm: %s\n") % realm) | |
135 | if user: |
|
135 | if user: | |
136 | self.ui.status(_("user: %s\n") % user) |
|
136 | self.ui.status(_("user: %s\n") % user) | |
137 | else: |
|
137 | else: | |
138 | user = self.ui.prompt(_("user:"), default=None) |
|
138 | user = self.ui.prompt(_("user:"), default=None) | |
139 |
|
139 | |||
140 | if not passwd: |
|
140 | if not passwd: | |
141 | passwd = self.ui.getpass() |
|
141 | passwd = self.ui.getpass() | |
142 |
|
142 | |||
143 | self.add_password(realm, authuri, user, passwd) |
|
143 | self.add_password(realm, authuri, user, passwd) | |
144 | self._writedebug(user, passwd) |
|
144 | self._writedebug(user, passwd) | |
145 | return (user, passwd) |
|
145 | return (user, passwd) | |
146 |
|
146 | |||
147 | def _writedebug(self, user, passwd): |
|
147 | def _writedebug(self, user, passwd): | |
148 | msg = _('http auth: user %s, password %s\n') |
|
148 | msg = _('http auth: user %s, password %s\n') | |
149 | self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set')) |
|
149 | self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set')) | |
150 |
|
150 | |||
151 | def readauthtoken(self, uri): |
|
151 | def readauthtoken(self, uri): | |
152 | # Read configuration |
|
152 | # Read configuration | |
153 | config = dict() |
|
153 | config = dict() | |
154 | for key, val in self.ui.configitems('auth'): |
|
154 | for key, val in self.ui.configitems('auth'): | |
155 | if '.' not in key: |
|
155 | if '.' not in key: | |
156 | self.ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) |
|
156 | self.ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) | |
157 | continue |
|
157 | continue | |
158 | group, setting = key.split('.', 1) |
|
158 | group, setting = key.split('.', 1) | |
159 | gdict = config.setdefault(group, dict()) |
|
159 | gdict = config.setdefault(group, dict()) | |
160 | if setting in ('username', 'cert', 'key'): |
|
160 | if setting in ('username', 'cert', 'key'): | |
161 | val = util.expandpath(val) |
|
161 | val = util.expandpath(val) | |
162 | gdict[setting] = val |
|
162 | gdict[setting] = val | |
163 |
|
163 | |||
164 | # Find the best match |
|
164 | # Find the best match | |
165 | scheme, hostpath = uri.split('://', 1) |
|
165 | scheme, hostpath = uri.split('://', 1) | |
166 | bestlen = 0 |
|
166 | bestlen = 0 | |
167 | bestauth = None |
|
167 | bestauth = None | |
168 | for auth in config.itervalues(): |
|
168 | for auth in config.itervalues(): | |
169 | prefix = auth.get('prefix') |
|
169 | prefix = auth.get('prefix') | |
170 | if not prefix: |
|
170 | if not prefix: | |
171 | continue |
|
171 | continue | |
172 | p = prefix.split('://', 1) |
|
172 | p = prefix.split('://', 1) | |
173 | if len(p) > 1: |
|
173 | if len(p) > 1: | |
174 | schemes, prefix = [p[0]], p[1] |
|
174 | schemes, prefix = [p[0]], p[1] | |
175 | else: |
|
175 | else: | |
176 | schemes = (auth.get('schemes') or 'https').split() |
|
176 | schemes = (auth.get('schemes') or 'https').split() | |
177 | if (prefix == '*' or hostpath.startswith(prefix)) and \ |
|
177 | if (prefix == '*' or hostpath.startswith(prefix)) and \ | |
178 | len(prefix) > bestlen and scheme in schemes: |
|
178 | len(prefix) > bestlen and scheme in schemes: | |
179 | bestlen = len(prefix) |
|
179 | bestlen = len(prefix) | |
180 | bestauth = auth |
|
180 | bestauth = auth | |
181 | return bestauth |
|
181 | return bestauth | |
182 |
|
182 | |||
183 | class proxyhandler(urllib2.ProxyHandler): |
|
183 | class proxyhandler(urllib2.ProxyHandler): | |
184 | def __init__(self, ui): |
|
184 | def __init__(self, ui): | |
185 | proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') |
|
185 | proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') | |
186 | # XXX proxyauthinfo = None |
|
186 | # XXX proxyauthinfo = None | |
187 |
|
187 | |||
188 | if proxyurl: |
|
188 | if proxyurl: | |
189 | # proxy can be proper url or host[:port] |
|
189 | # proxy can be proper url or host[:port] | |
190 | if not (proxyurl.startswith('http:') or |
|
190 | if not (proxyurl.startswith('http:') or | |
191 | proxyurl.startswith('https:')): |
|
191 | proxyurl.startswith('https:')): | |
192 | proxyurl = 'http://' + proxyurl + '/' |
|
192 | proxyurl = 'http://' + proxyurl + '/' | |
193 | snpqf = urlparse.urlsplit(proxyurl) |
|
193 | snpqf = urlparse.urlsplit(proxyurl) | |
194 | proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf |
|
194 | proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf | |
195 | hpup = netlocsplit(proxynetloc) |
|
195 | hpup = netlocsplit(proxynetloc) | |
196 |
|
196 | |||
197 | proxyhost, proxyport, proxyuser, proxypasswd = hpup |
|
197 | proxyhost, proxyport, proxyuser, proxypasswd = hpup | |
198 | if not proxyuser: |
|
198 | if not proxyuser: | |
199 | proxyuser = ui.config("http_proxy", "user") |
|
199 | proxyuser = ui.config("http_proxy", "user") | |
200 | proxypasswd = ui.config("http_proxy", "passwd") |
|
200 | proxypasswd = ui.config("http_proxy", "passwd") | |
201 |
|
201 | |||
202 | # see if we should use a proxy for this url |
|
202 | # see if we should use a proxy for this url | |
203 | no_list = ["localhost", "127.0.0.1"] |
|
203 | no_list = ["localhost", "127.0.0.1"] | |
204 | no_list.extend([p.lower() for |
|
204 | no_list.extend([p.lower() for | |
205 | p in ui.configlist("http_proxy", "no")]) |
|
205 | p in ui.configlist("http_proxy", "no")]) | |
206 | no_list.extend([p.strip().lower() for |
|
206 | no_list.extend([p.strip().lower() for | |
207 | p in os.getenv("no_proxy", '').split(',') |
|
207 | p in os.getenv("no_proxy", '').split(',') | |
208 | if p.strip()]) |
|
208 | if p.strip()]) | |
209 | # "http_proxy.always" config is for running tests on localhost |
|
209 | # "http_proxy.always" config is for running tests on localhost | |
210 | if ui.configbool("http_proxy", "always"): |
|
210 | if ui.configbool("http_proxy", "always"): | |
211 | self.no_list = [] |
|
211 | self.no_list = [] | |
212 | else: |
|
212 | else: | |
213 | self.no_list = no_list |
|
213 | self.no_list = no_list | |
214 |
|
214 | |||
215 | proxyurl = urlparse.urlunsplit(( |
|
215 | proxyurl = urlparse.urlunsplit(( | |
216 | proxyscheme, netlocunsplit(proxyhost, proxyport, |
|
216 | proxyscheme, netlocunsplit(proxyhost, proxyport, | |
217 | proxyuser, proxypasswd or ''), |
|
217 | proxyuser, proxypasswd or ''), | |
218 | proxypath, proxyquery, proxyfrag)) |
|
218 | proxypath, proxyquery, proxyfrag)) | |
219 | proxies = {'http': proxyurl, 'https': proxyurl} |
|
219 | proxies = {'http': proxyurl, 'https': proxyurl} | |
220 | ui.debug('proxying through http://%s:%s\n' % |
|
220 | ui.debug('proxying through http://%s:%s\n' % | |
221 | (proxyhost, proxyport)) |
|
221 | (proxyhost, proxyport)) | |
222 | else: |
|
222 | else: | |
223 | proxies = {} |
|
223 | proxies = {} | |
224 |
|
224 | |||
225 | # urllib2 takes proxy values from the environment and those |
|
225 | # urllib2 takes proxy values from the environment and those | |
226 | # will take precedence if found, so drop them |
|
226 | # will take precedence if found, so drop them | |
227 | for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: |
|
227 | for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: | |
228 | try: |
|
228 | try: | |
229 | if env in os.environ: |
|
229 | if env in os.environ: | |
230 | del os.environ[env] |
|
230 | del os.environ[env] | |
231 | except OSError: |
|
231 | except OSError: | |
232 | pass |
|
232 | pass | |
233 |
|
233 | |||
234 | urllib2.ProxyHandler.__init__(self, proxies) |
|
234 | urllib2.ProxyHandler.__init__(self, proxies) | |
235 | self.ui = ui |
|
235 | self.ui = ui | |
236 |
|
236 | |||
237 | def proxy_open(self, req, proxy, type_): |
|
237 | def proxy_open(self, req, proxy, type_): | |
238 | host = req.get_host().split(':')[0] |
|
238 | host = req.get_host().split(':')[0] | |
239 | if host in self.no_list: |
|
239 | if host in self.no_list: | |
240 | return None |
|
240 | return None | |
241 |
|
241 | |||
242 | # work around a bug in Python < 2.4.2 |
|
242 | # work around a bug in Python < 2.4.2 | |
243 | # (it leaves a "\n" at the end of Proxy-authorization headers) |
|
243 | # (it leaves a "\n" at the end of Proxy-authorization headers) | |
244 | baseclass = req.__class__ |
|
244 | baseclass = req.__class__ | |
245 | class _request(baseclass): |
|
245 | class _request(baseclass): | |
246 | def add_header(self, key, val): |
|
246 | def add_header(self, key, val): | |
247 | if key.lower() == 'proxy-authorization': |
|
247 | if key.lower() == 'proxy-authorization': | |
248 | val = val.strip() |
|
248 | val = val.strip() | |
249 | return baseclass.add_header(self, key, val) |
|
249 | return baseclass.add_header(self, key, val) | |
250 | req.__class__ = _request |
|
250 | req.__class__ = _request | |
251 |
|
251 | |||
252 | return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_) |
|
252 | return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_) | |
253 |
|
253 | |||
254 | class httpsendfile(object): |
|
254 | class httpsendfile(object): | |
255 | """This is a wrapper around the objects returned by python's "open". |
|
255 | """This is a wrapper around the objects returned by python's "open". | |
256 |
|
256 | |||
257 | Its purpose is to send file-like objects via HTTP and, to do so, it |
|
257 | Its purpose is to send file-like objects via HTTP and, to do so, it | |
258 | defines a __len__ attribute to feed the Content-Length header. |
|
258 | defines a __len__ attribute to feed the Content-Length header. | |
259 | """ |
|
259 | """ | |
260 |
|
260 | |||
261 | def __init__(self, *args, **kwargs): |
|
261 | def __init__(self, *args, **kwargs): | |
262 | # We can't just "self._data = open(*args, **kwargs)" here because there |
|
262 | # We can't just "self._data = open(*args, **kwargs)" here because there | |
263 | # is an "open" function defined in this module that shadows the global |
|
263 | # is an "open" function defined in this module that shadows the global | |
264 | # one |
|
264 | # one | |
265 | self._data = __builtin__.open(*args, **kwargs) |
|
265 | self._data = __builtin__.open(*args, **kwargs) | |
266 | self.read = self._data.read |
|
266 | self.read = self._data.read | |
267 | self.seek = self._data.seek |
|
267 | self.seek = self._data.seek | |
268 | self.close = self._data.close |
|
268 | self.close = self._data.close | |
269 | self.write = self._data.write |
|
269 | self.write = self._data.write | |
270 |
|
270 | |||
271 | def __len__(self): |
|
271 | def __len__(self): | |
272 | return os.fstat(self._data.fileno()).st_size |
|
272 | return os.fstat(self._data.fileno()).st_size | |
273 |
|
273 | |||
274 | def _gen_sendfile(connection): |
|
274 | def _gen_sendfile(connection): | |
275 | def _sendfile(self, data): |
|
275 | def _sendfile(self, data): | |
276 | # send a file |
|
276 | # send a file | |
277 | if isinstance(data, httpsendfile): |
|
277 | if isinstance(data, httpsendfile): | |
278 | # if auth required, some data sent twice, so rewind here |
|
278 | # if auth required, some data sent twice, so rewind here | |
279 | data.seek(0) |
|
279 | data.seek(0) | |
280 | for chunk in util.filechunkiter(data): |
|
280 | for chunk in util.filechunkiter(data): | |
281 | connection.send(self, chunk) |
|
281 | connection.send(self, chunk) | |
282 | else: |
|
282 | else: | |
283 | connection.send(self, data) |
|
283 | connection.send(self, data) | |
284 | return _sendfile |
|
284 | return _sendfile | |
285 |
|
285 | |||
286 | has_https = hasattr(urllib2, 'HTTPSHandler') |
|
286 | has_https = hasattr(urllib2, 'HTTPSHandler') | |
287 | if has_https: |
|
287 | if has_https: | |
288 | try: |
|
288 | try: | |
289 | # avoid using deprecated/broken FakeSocket in python 2.6 |
|
289 | # avoid using deprecated/broken FakeSocket in python 2.6 | |
290 | import ssl |
|
290 | import ssl | |
291 | _ssl_wrap_socket = ssl.wrap_socket |
|
291 | _ssl_wrap_socket = ssl.wrap_socket | |
292 | CERT_REQUIRED = ssl.CERT_REQUIRED |
|
292 | CERT_REQUIRED = ssl.CERT_REQUIRED | |
293 | except ImportError: |
|
293 | except ImportError: | |
294 | CERT_REQUIRED = 2 |
|
294 | CERT_REQUIRED = 2 | |
295 |
|
295 | |||
296 | def _ssl_wrap_socket(sock, key_file, cert_file, |
|
296 | def _ssl_wrap_socket(sock, key_file, cert_file, | |
297 | cert_reqs=CERT_REQUIRED, ca_certs=None): |
|
297 | cert_reqs=CERT_REQUIRED, ca_certs=None): | |
298 | if ca_certs: |
|
298 | if ca_certs: | |
299 | raise util.Abort(_( |
|
299 | raise util.Abort(_( | |
300 | 'certificate checking requires Python 2.6')) |
|
300 | 'certificate checking requires Python 2.6')) | |
301 |
|
301 | |||
302 | ssl = socket.ssl(sock, key_file, cert_file) |
|
302 | ssl = socket.ssl(sock, key_file, cert_file) | |
303 | return httplib.FakeSocket(sock, ssl) |
|
303 | return httplib.FakeSocket(sock, ssl) | |
304 |
|
304 | |||
305 | try: |
|
305 | try: | |
306 | _create_connection = socket.create_connection |
|
306 | _create_connection = socket.create_connection | |
307 | except AttributeError: |
|
307 | except AttributeError: | |
308 | _GLOBAL_DEFAULT_TIMEOUT = object() |
|
308 | _GLOBAL_DEFAULT_TIMEOUT = object() | |
309 |
|
309 | |||
310 | def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, |
|
310 | def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, | |
311 | source_address=None): |
|
311 | source_address=None): | |
312 | # lifted from Python 2.6 |
|
312 | # lifted from Python 2.6 | |
313 |
|
313 | |||
314 | msg = "getaddrinfo returns an empty list" |
|
314 | msg = "getaddrinfo returns an empty list" | |
315 | host, port = address |
|
315 | host, port = address | |
316 | for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): |
|
316 | for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): | |
317 | af, socktype, proto, canonname, sa = res |
|
317 | af, socktype, proto, canonname, sa = res | |
318 | sock = None |
|
318 | sock = None | |
319 | try: |
|
319 | try: | |
320 | sock = socket.socket(af, socktype, proto) |
|
320 | sock = socket.socket(af, socktype, proto) | |
321 | if timeout is not _GLOBAL_DEFAULT_TIMEOUT: |
|
321 | if timeout is not _GLOBAL_DEFAULT_TIMEOUT: | |
322 | sock.settimeout(timeout) |
|
322 | sock.settimeout(timeout) | |
323 | if source_address: |
|
323 | if source_address: | |
324 | sock.bind(source_address) |
|
324 | sock.bind(source_address) | |
325 | sock.connect(sa) |
|
325 | sock.connect(sa) | |
326 | return sock |
|
326 | return sock | |
327 |
|
327 | |||
328 | except socket.error, msg: |
|
328 | except socket.error, msg: | |
329 | if sock is not None: |
|
329 | if sock is not None: | |
330 | sock.close() |
|
330 | sock.close() | |
331 |
|
331 | |||
332 | raise socket.error, msg |
|
332 | raise socket.error, msg | |
333 |
|
333 | |||
334 | class httpconnection(keepalive.HTTPConnection): |
|
334 | class httpconnection(keepalive.HTTPConnection): | |
335 | # must be able to send big bundle as stream. |
|
335 | # must be able to send big bundle as stream. | |
336 | send = _gen_sendfile(keepalive.HTTPConnection) |
|
336 | send = _gen_sendfile(keepalive.HTTPConnection) | |
337 |
|
337 | |||
338 | def connect(self): |
|
338 | def connect(self): | |
339 | if has_https and self.realhostport: # use CONNECT proxy |
|
339 | if has_https and self.realhostport: # use CONNECT proxy | |
340 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
340 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | |
341 | self.sock.connect((self.host, self.port)) |
|
341 | self.sock.connect((self.host, self.port)) | |
342 | if _generic_proxytunnel(self): |
|
342 | if _generic_proxytunnel(self): | |
343 | # we do not support client x509 certificates |
|
343 | # we do not support client x509 certificates | |
344 | self.sock = _ssl_wrap_socket(self.sock, None, None) |
|
344 | self.sock = _ssl_wrap_socket(self.sock, None, None) | |
345 | else: |
|
345 | else: | |
346 | keepalive.HTTPConnection.connect(self) |
|
346 | keepalive.HTTPConnection.connect(self) | |
347 |
|
347 | |||
348 | def getresponse(self): |
|
348 | def getresponse(self): | |
349 | proxyres = getattr(self, 'proxyres', None) |
|
349 | proxyres = getattr(self, 'proxyres', None) | |
350 | if proxyres: |
|
350 | if proxyres: | |
351 | if proxyres.will_close: |
|
351 | if proxyres.will_close: | |
352 | self.close() |
|
352 | self.close() | |
353 | self.proxyres = None |
|
353 | self.proxyres = None | |
354 | return proxyres |
|
354 | return proxyres | |
355 | return keepalive.HTTPConnection.getresponse(self) |
|
355 | return keepalive.HTTPConnection.getresponse(self) | |
356 |
|
356 | |||
357 | # general transaction handler to support different ways to handle |
|
357 | # general transaction handler to support different ways to handle | |
358 | # HTTPS proxying before and after Python 2.6.3. |
|
358 | # HTTPS proxying before and after Python 2.6.3. | |
359 | def _generic_start_transaction(handler, h, req): |
|
359 | def _generic_start_transaction(handler, h, req): | |
360 | if hasattr(req, '_tunnel_host') and req._tunnel_host: |
|
360 | if hasattr(req, '_tunnel_host') and req._tunnel_host: | |
361 | tunnel_host = req._tunnel_host |
|
361 | tunnel_host = req._tunnel_host | |
362 | if tunnel_host[:7] not in ['http://', 'https:/']: |
|
362 | if tunnel_host[:7] not in ['http://', 'https:/']: | |
363 | tunnel_host = 'https://' + tunnel_host |
|
363 | tunnel_host = 'https://' + tunnel_host | |
364 | new_tunnel = True |
|
364 | new_tunnel = True | |
365 | else: |
|
365 | else: | |
366 | tunnel_host = req.get_selector() |
|
366 | tunnel_host = req.get_selector() | |
367 | new_tunnel = False |
|
367 | new_tunnel = False | |
368 |
|
368 | |||
369 | if new_tunnel or tunnel_host == req.get_full_url(): # has proxy |
|
369 | if new_tunnel or tunnel_host == req.get_full_url(): # has proxy | |
370 | urlparts = urlparse.urlparse(tunnel_host) |
|
370 | urlparts = urlparse.urlparse(tunnel_host) | |
371 | if new_tunnel or urlparts[0] == 'https': # only use CONNECT for HTTPS |
|
371 | if new_tunnel or urlparts[0] == 'https': # only use CONNECT for HTTPS | |
372 | realhostport = urlparts[1] |
|
372 | realhostport = urlparts[1] | |
373 | if realhostport[-1] == ']' or ':' not in realhostport: |
|
373 | if realhostport[-1] == ']' or ':' not in realhostport: | |
374 | realhostport += ':443' |
|
374 | realhostport += ':443' | |
375 |
|
375 | |||
376 | h.realhostport = realhostport |
|
376 | h.realhostport = realhostport | |
377 | h.headers = req.headers.copy() |
|
377 | h.headers = req.headers.copy() | |
378 | h.headers.update(handler.parent.addheaders) |
|
378 | h.headers.update(handler.parent.addheaders) | |
379 | return |
|
379 | return | |
380 |
|
380 | |||
381 | h.realhostport = None |
|
381 | h.realhostport = None | |
382 | h.headers = None |
|
382 | h.headers = None | |
383 |
|
383 | |||
384 | def _generic_proxytunnel(self): |
|
384 | def _generic_proxytunnel(self): | |
385 | proxyheaders = dict( |
|
385 | proxyheaders = dict( | |
386 | [(x, self.headers[x]) for x in self.headers |
|
386 | [(x, self.headers[x]) for x in self.headers | |
387 | if x.lower().startswith('proxy-')]) |
|
387 | if x.lower().startswith('proxy-')]) | |
388 | self._set_hostport(self.host, self.port) |
|
388 | self._set_hostport(self.host, self.port) | |
389 | self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport) |
|
389 | self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport) | |
390 | for header in proxyheaders.iteritems(): |
|
390 | for header in proxyheaders.iteritems(): | |
391 | self.send('%s: %s\r\n' % header) |
|
391 | self.send('%s: %s\r\n' % header) | |
392 | self.send('\r\n') |
|
392 | self.send('\r\n') | |
393 |
|
393 | |||
394 | # majority of the following code is duplicated from |
|
394 | # majority of the following code is duplicated from | |
395 | # httplib.HTTPConnection as there are no adequate places to |
|
395 | # httplib.HTTPConnection as there are no adequate places to | |
396 | # override functions to provide the needed functionality |
|
396 | # override functions to provide the needed functionality | |
397 | res = self.response_class(self.sock, |
|
397 | res = self.response_class(self.sock, | |
398 | strict=self.strict, |
|
398 | strict=self.strict, | |
399 | method=self._method) |
|
399 | method=self._method) | |
400 |
|
400 | |||
401 | while True: |
|
401 | while True: | |
402 | version, status, reason = res._read_status() |
|
402 | version, status, reason = res._read_status() | |
403 | if status != httplib.CONTINUE: |
|
403 | if status != httplib.CONTINUE: | |
404 | break |
|
404 | break | |
405 | while True: |
|
405 | while True: | |
406 | skip = res.fp.readline().strip() |
|
406 | skip = res.fp.readline().strip() | |
407 | if not skip: |
|
407 | if not skip: | |
408 | break |
|
408 | break | |
409 | res.status = status |
|
409 | res.status = status | |
410 | res.reason = reason.strip() |
|
410 | res.reason = reason.strip() | |
411 |
|
411 | |||
412 | if res.status == 200: |
|
412 | if res.status == 200: | |
413 | while True: |
|
413 | while True: | |
414 | line = res.fp.readline() |
|
414 | line = res.fp.readline() | |
415 | if line == '\r\n': |
|
415 | if line == '\r\n': | |
416 | break |
|
416 | break | |
417 | return True |
|
417 | return True | |
418 |
|
418 | |||
419 | if version == 'HTTP/1.0': |
|
419 | if version == 'HTTP/1.0': | |
420 | res.version = 10 |
|
420 | res.version = 10 | |
421 | elif version.startswith('HTTP/1.'): |
|
421 | elif version.startswith('HTTP/1.'): | |
422 | res.version = 11 |
|
422 | res.version = 11 | |
423 | elif version == 'HTTP/0.9': |
|
423 | elif version == 'HTTP/0.9': | |
424 | res.version = 9 |
|
424 | res.version = 9 | |
425 | else: |
|
425 | else: | |
426 | raise httplib.UnknownProtocol(version) |
|
426 | raise httplib.UnknownProtocol(version) | |
427 |
|
427 | |||
428 | if res.version == 9: |
|
428 | if res.version == 9: | |
429 | res.length = None |
|
429 | res.length = None | |
430 | res.chunked = 0 |
|
430 | res.chunked = 0 | |
431 | res.will_close = 1 |
|
431 | res.will_close = 1 | |
432 | res.msg = httplib.HTTPMessage(cStringIO.StringIO()) |
|
432 | res.msg = httplib.HTTPMessage(cStringIO.StringIO()) | |
433 | return False |
|
433 | return False | |
434 |
|
434 | |||
435 | res.msg = httplib.HTTPMessage(res.fp) |
|
435 | res.msg = httplib.HTTPMessage(res.fp) | |
436 | res.msg.fp = None |
|
436 | res.msg.fp = None | |
437 |
|
437 | |||
438 | # are we using the chunked-style of transfer encoding? |
|
438 | # are we using the chunked-style of transfer encoding? | |
439 | trenc = res.msg.getheader('transfer-encoding') |
|
439 | trenc = res.msg.getheader('transfer-encoding') | |
440 | if trenc and trenc.lower() == "chunked": |
|
440 | if trenc and trenc.lower() == "chunked": | |
441 | res.chunked = 1 |
|
441 | res.chunked = 1 | |
442 | res.chunk_left = None |
|
442 | res.chunk_left = None | |
443 | else: |
|
443 | else: | |
444 | res.chunked = 0 |
|
444 | res.chunked = 0 | |
445 |
|
445 | |||
446 | # will the connection close at the end of the response? |
|
446 | # will the connection close at the end of the response? | |
447 | res.will_close = res._check_close() |
|
447 | res.will_close = res._check_close() | |
448 |
|
448 | |||
449 | # do we have a Content-Length? |
|
449 | # do we have a Content-Length? | |
450 | # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" |
|
450 | # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" | |
451 | length = res.msg.getheader('content-length') |
|
451 | length = res.msg.getheader('content-length') | |
452 | if length and not res.chunked: |
|
452 | if length and not res.chunked: | |
453 | try: |
|
453 | try: | |
454 | res.length = int(length) |
|
454 | res.length = int(length) | |
455 | except ValueError: |
|
455 | except ValueError: | |
456 | res.length = None |
|
456 | res.length = None | |
457 | else: |
|
457 | else: | |
458 | if res.length < 0: # ignore nonsensical negative lengths |
|
458 | if res.length < 0: # ignore nonsensical negative lengths | |
459 | res.length = None |
|
459 | res.length = None | |
460 | else: |
|
460 | else: | |
461 | res.length = None |
|
461 | res.length = None | |
462 |
|
462 | |||
463 | # does the body have a fixed length? (of zero) |
|
463 | # does the body have a fixed length? (of zero) | |
464 | if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or |
|
464 | if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or | |
465 | 100 <= status < 200 or # 1xx codes |
|
465 | 100 <= status < 200 or # 1xx codes | |
466 | res._method == 'HEAD'): |
|
466 | res._method == 'HEAD'): | |
467 | res.length = 0 |
|
467 | res.length = 0 | |
468 |
|
468 | |||
469 | # if the connection remains open, and we aren't using chunked, and |
|
469 | # if the connection remains open, and we aren't using chunked, and | |
470 | # a content-length was not provided, then assume that the connection |
|
470 | # a content-length was not provided, then assume that the connection | |
471 | # WILL close. |
|
471 | # WILL close. | |
472 | if (not res.will_close and |
|
472 | if (not res.will_close and | |
473 | not res.chunked and |
|
473 | not res.chunked and | |
474 | res.length is None): |
|
474 | res.length is None): | |
475 | res.will_close = 1 |
|
475 | res.will_close = 1 | |
476 |
|
476 | |||
477 | self.proxyres = res |
|
477 | self.proxyres = res | |
478 |
|
478 | |||
479 | return False |
|
479 | return False | |
480 |
|
480 | |||
481 | class httphandler(keepalive.HTTPHandler): |
|
481 | class httphandler(keepalive.HTTPHandler): | |
482 | def http_open(self, req): |
|
482 | def http_open(self, req): | |
483 | return self.do_open(httpconnection, req) |
|
483 | return self.do_open(httpconnection, req) | |
484 |
|
484 | |||
485 | def _start_transaction(self, h, req): |
|
485 | def _start_transaction(self, h, req): | |
486 | _generic_start_transaction(self, h, req) |
|
486 | _generic_start_transaction(self, h, req) | |
487 | return keepalive.HTTPHandler._start_transaction(self, h, req) |
|
487 | return keepalive.HTTPHandler._start_transaction(self, h, req) | |
488 |
|
488 | |||
489 | def __del__(self): |
|
|||
490 | self.close_all() |
|
|||
491 |
|
||||
492 | if has_https: |
|
489 | if has_https: | |
493 | class BetterHTTPS(httplib.HTTPSConnection): |
|
490 | class BetterHTTPS(httplib.HTTPSConnection): | |
494 | send = keepalive.safesend |
|
491 | send = keepalive.safesend | |
495 |
|
492 | |||
496 | def connect(self): |
|
493 | def connect(self): | |
497 | if hasattr(self, 'ui'): |
|
494 | if hasattr(self, 'ui'): | |
498 | cacerts = self.ui.config('web', 'cacerts') |
|
495 | cacerts = self.ui.config('web', 'cacerts') | |
499 | else: |
|
496 | else: | |
500 | cacerts = None |
|
497 | cacerts = None | |
501 |
|
498 | |||
502 | if cacerts: |
|
499 | if cacerts: | |
503 | sock = _create_connection((self.host, self.port)) |
|
500 | sock = _create_connection((self.host, self.port)) | |
504 | self.sock = _ssl_wrap_socket(sock, self.key_file, |
|
501 | self.sock = _ssl_wrap_socket(sock, self.key_file, | |
505 | self.cert_file, cert_reqs=CERT_REQUIRED, |
|
502 | self.cert_file, cert_reqs=CERT_REQUIRED, | |
506 | ca_certs=cacerts) |
|
503 | ca_certs=cacerts) | |
507 | self.ui.debug(_('server identity verification succeeded\n')) |
|
504 | self.ui.debug(_('server identity verification succeeded\n')) | |
508 | else: |
|
505 | else: | |
509 | httplib.HTTPSConnection.connect(self) |
|
506 | httplib.HTTPSConnection.connect(self) | |
510 |
|
507 | |||
511 | class httpsconnection(BetterHTTPS): |
|
508 | class httpsconnection(BetterHTTPS): | |
512 | response_class = keepalive.HTTPResponse |
|
509 | response_class = keepalive.HTTPResponse | |
513 | # must be able to send big bundle as stream. |
|
510 | # must be able to send big bundle as stream. | |
514 | send = _gen_sendfile(BetterHTTPS) |
|
511 | send = _gen_sendfile(BetterHTTPS) | |
515 | getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection) |
|
512 | getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection) | |
516 |
|
513 | |||
517 | def connect(self): |
|
514 | def connect(self): | |
518 | if self.realhostport: # use CONNECT proxy |
|
515 | if self.realhostport: # use CONNECT proxy | |
519 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
516 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | |
520 | self.sock.connect((self.host, self.port)) |
|
517 | self.sock.connect((self.host, self.port)) | |
521 | if _generic_proxytunnel(self): |
|
518 | if _generic_proxytunnel(self): | |
522 | self.sock = _ssl_wrap_socket(self.sock, self.cert_file, |
|
519 | self.sock = _ssl_wrap_socket(self.sock, self.cert_file, | |
523 | self.key_file) |
|
520 | self.key_file) | |
524 | else: |
|
521 | else: | |
525 | BetterHTTPS.connect(self) |
|
522 | BetterHTTPS.connect(self) | |
526 |
|
523 | |||
527 | class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler): |
|
524 | class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler): | |
528 | def __init__(self, ui): |
|
525 | def __init__(self, ui): | |
529 | keepalive.KeepAliveHandler.__init__(self) |
|
526 | keepalive.KeepAliveHandler.__init__(self) | |
530 | urllib2.HTTPSHandler.__init__(self) |
|
527 | urllib2.HTTPSHandler.__init__(self) | |
531 | self.ui = ui |
|
528 | self.ui = ui | |
532 | self.pwmgr = passwordmgr(self.ui) |
|
529 | self.pwmgr = passwordmgr(self.ui) | |
533 |
|
530 | |||
534 | def _start_transaction(self, h, req): |
|
531 | def _start_transaction(self, h, req): | |
535 | _generic_start_transaction(self, h, req) |
|
532 | _generic_start_transaction(self, h, req) | |
536 | return keepalive.KeepAliveHandler._start_transaction(self, h, req) |
|
533 | return keepalive.KeepAliveHandler._start_transaction(self, h, req) | |
537 |
|
534 | |||
538 | def https_open(self, req): |
|
535 | def https_open(self, req): | |
539 | self.auth = self.pwmgr.readauthtoken(req.get_full_url()) |
|
536 | self.auth = self.pwmgr.readauthtoken(req.get_full_url()) | |
540 | return self.do_open(self._makeconnection, req) |
|
537 | return self.do_open(self._makeconnection, req) | |
541 |
|
538 | |||
542 | def _makeconnection(self, host, port=None, *args, **kwargs): |
|
539 | def _makeconnection(self, host, port=None, *args, **kwargs): | |
543 | keyfile = None |
|
540 | keyfile = None | |
544 | certfile = None |
|
541 | certfile = None | |
545 |
|
542 | |||
546 | if len(args) >= 1: # key_file |
|
543 | if len(args) >= 1: # key_file | |
547 | keyfile = args[0] |
|
544 | keyfile = args[0] | |
548 | if len(args) >= 2: # cert_file |
|
545 | if len(args) >= 2: # cert_file | |
549 | certfile = args[1] |
|
546 | certfile = args[1] | |
550 | args = args[2:] |
|
547 | args = args[2:] | |
551 |
|
548 | |||
552 | # if the user has specified different key/cert files in |
|
549 | # if the user has specified different key/cert files in | |
553 | # hgrc, we prefer these |
|
550 | # hgrc, we prefer these | |
554 | if self.auth and 'key' in self.auth and 'cert' in self.auth: |
|
551 | if self.auth and 'key' in self.auth and 'cert' in self.auth: | |
555 | keyfile = self.auth['key'] |
|
552 | keyfile = self.auth['key'] | |
556 | certfile = self.auth['cert'] |
|
553 | certfile = self.auth['cert'] | |
557 |
|
554 | |||
558 | conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs) |
|
555 | conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs) | |
559 | conn.ui = self.ui |
|
556 | conn.ui = self.ui | |
560 | return conn |
|
557 | return conn | |
561 |
|
558 | |||
562 | class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler): |
|
559 | class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler): | |
563 | def __init__(self, *args, **kwargs): |
|
560 | def __init__(self, *args, **kwargs): | |
564 | urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs) |
|
561 | urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs) | |
565 | self.retried_req = None |
|
562 | self.retried_req = None | |
566 |
|
563 | |||
567 | def reset_retry_count(self): |
|
564 | def reset_retry_count(self): | |
568 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop |
|
565 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop | |
569 | # forever. We disable reset_retry_count completely and reset in |
|
566 | # forever. We disable reset_retry_count completely and reset in | |
570 | # http_error_auth_reqed instead. |
|
567 | # http_error_auth_reqed instead. | |
571 | pass |
|
568 | pass | |
572 |
|
569 | |||
573 | def http_error_auth_reqed(self, auth_header, host, req, headers): |
|
570 | def http_error_auth_reqed(self, auth_header, host, req, headers): | |
574 | # Reset the retry counter once for each request. |
|
571 | # Reset the retry counter once for each request. | |
575 | if req is not self.retried_req: |
|
572 | if req is not self.retried_req: | |
576 | self.retried_req = req |
|
573 | self.retried_req = req | |
577 | self.retried = 0 |
|
574 | self.retried = 0 | |
578 | # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if |
|
575 | # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if | |
579 | # it doesn't know about the auth type requested. This can happen if |
|
576 | # it doesn't know about the auth type requested. This can happen if | |
580 | # somebody is using BasicAuth and types a bad password. |
|
577 | # somebody is using BasicAuth and types a bad password. | |
581 | try: |
|
578 | try: | |
582 | return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed( |
|
579 | return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed( | |
583 | self, auth_header, host, req, headers) |
|
580 | self, auth_header, host, req, headers) | |
584 | except ValueError, inst: |
|
581 | except ValueError, inst: | |
585 | arg = inst.args[0] |
|
582 | arg = inst.args[0] | |
586 | if arg.startswith("AbstractDigestAuthHandler doesn't know "): |
|
583 | if arg.startswith("AbstractDigestAuthHandler doesn't know "): | |
587 | return |
|
584 | return | |
588 | raise |
|
585 | raise | |
589 |
|
586 | |||
590 | class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler): |
|
587 | class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler): | |
591 | def __init__(self, *args, **kwargs): |
|
588 | def __init__(self, *args, **kwargs): | |
592 | urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) |
|
589 | urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) | |
593 | self.retried_req = None |
|
590 | self.retried_req = None | |
594 |
|
591 | |||
595 | def reset_retry_count(self): |
|
592 | def reset_retry_count(self): | |
596 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop |
|
593 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop | |
597 | # forever. We disable reset_retry_count completely and reset in |
|
594 | # forever. We disable reset_retry_count completely and reset in | |
598 | # http_error_auth_reqed instead. |
|
595 | # http_error_auth_reqed instead. | |
599 | pass |
|
596 | pass | |
600 |
|
597 | |||
601 | def http_error_auth_reqed(self, auth_header, host, req, headers): |
|
598 | def http_error_auth_reqed(self, auth_header, host, req, headers): | |
602 | # Reset the retry counter once for each request. |
|
599 | # Reset the retry counter once for each request. | |
603 | if req is not self.retried_req: |
|
600 | if req is not self.retried_req: | |
604 | self.retried_req = req |
|
601 | self.retried_req = req | |
605 | self.retried = 0 |
|
602 | self.retried = 0 | |
606 | return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( |
|
603 | return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( | |
607 | self, auth_header, host, req, headers) |
|
604 | self, auth_header, host, req, headers) | |
608 |
|
605 | |||
609 | def getauthinfo(path): |
|
606 | def getauthinfo(path): | |
610 | scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) |
|
607 | scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) | |
611 | if not urlpath: |
|
608 | if not urlpath: | |
612 | urlpath = '/' |
|
609 | urlpath = '/' | |
613 | if scheme != 'file': |
|
610 | if scheme != 'file': | |
614 | # XXX: why are we quoting the path again with some smart |
|
611 | # XXX: why are we quoting the path again with some smart | |
615 | # heuristic here? Anyway, it cannot be done with file:// |
|
612 | # heuristic here? Anyway, it cannot be done with file:// | |
616 | # urls since path encoding is os/fs dependent (see |
|
613 | # urls since path encoding is os/fs dependent (see | |
617 | # urllib.pathname2url() for details). |
|
614 | # urllib.pathname2url() for details). | |
618 | urlpath = quotepath(urlpath) |
|
615 | urlpath = quotepath(urlpath) | |
619 | host, port, user, passwd = netlocsplit(netloc) |
|
616 | host, port, user, passwd = netlocsplit(netloc) | |
620 |
|
617 | |||
621 | # urllib cannot handle URLs with embedded user or passwd |
|
618 | # urllib cannot handle URLs with embedded user or passwd | |
622 | url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), |
|
619 | url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), | |
623 | urlpath, query, frag)) |
|
620 | urlpath, query, frag)) | |
624 | if user: |
|
621 | if user: | |
625 | netloc = host |
|
622 | netloc = host | |
626 | if port: |
|
623 | if port: | |
627 | netloc += ':' + port |
|
624 | netloc += ':' + port | |
628 | # Python < 2.4.3 uses only the netloc to search for a password |
|
625 | # Python < 2.4.3 uses only the netloc to search for a password | |
629 | authinfo = (None, (url, netloc), user, passwd or '') |
|
626 | authinfo = (None, (url, netloc), user, passwd or '') | |
630 | else: |
|
627 | else: | |
631 | authinfo = None |
|
628 | authinfo = None | |
632 | return url, authinfo |
|
629 | return url, authinfo | |
633 |
|
630 | |||
634 | handlerfuncs = [] |
|
631 | handlerfuncs = [] | |
635 |
|
632 | |||
636 | def opener(ui, authinfo=None): |
|
633 | def opener(ui, authinfo=None): | |
637 | ''' |
|
634 | ''' | |
638 | construct an opener suitable for urllib2 |
|
635 | construct an opener suitable for urllib2 | |
639 | authinfo will be added to the password manager |
|
636 | authinfo will be added to the password manager | |
640 | ''' |
|
637 | ''' | |
641 | handlers = [httphandler()] |
|
638 | handlers = [httphandler()] | |
642 | if has_https: |
|
639 | if has_https: | |
643 | handlers.append(httpshandler(ui)) |
|
640 | handlers.append(httpshandler(ui)) | |
644 |
|
641 | |||
645 | handlers.append(proxyhandler(ui)) |
|
642 | handlers.append(proxyhandler(ui)) | |
646 |
|
643 | |||
647 | passmgr = passwordmgr(ui) |
|
644 | passmgr = passwordmgr(ui) | |
648 | if authinfo is not None: |
|
645 | if authinfo is not None: | |
649 | passmgr.add_password(*authinfo) |
|
646 | passmgr.add_password(*authinfo) | |
650 | user, passwd = authinfo[2:4] |
|
647 | user, passwd = authinfo[2:4] | |
651 | ui.debug('http auth: user %s, password %s\n' % |
|
648 | ui.debug('http auth: user %s, password %s\n' % | |
652 | (user, passwd and '*' * len(passwd) or 'not set')) |
|
649 | (user, passwd and '*' * len(passwd) or 'not set')) | |
653 |
|
650 | |||
654 | handlers.extend((httpbasicauthhandler(passmgr), |
|
651 | handlers.extend((httpbasicauthhandler(passmgr), | |
655 | httpdigestauthhandler(passmgr))) |
|
652 | httpdigestauthhandler(passmgr))) | |
656 | handlers.extend([h(ui, passmgr) for h in handlerfuncs]) |
|
653 | handlers.extend([h(ui, passmgr) for h in handlerfuncs]) | |
657 | opener = urllib2.build_opener(*handlers) |
|
654 | opener = urllib2.build_opener(*handlers) | |
658 |
|
655 | |||
659 | # 1.0 here is the _protocol_ version |
|
656 | # 1.0 here is the _protocol_ version | |
660 | opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] |
|
657 | opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] | |
661 | opener.addheaders.append(('Accept', 'application/mercurial-0.1')) |
|
658 | opener.addheaders.append(('Accept', 'application/mercurial-0.1')) | |
662 | return opener |
|
659 | return opener | |
663 |
|
660 | |||
664 | scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') |
|
661 | scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') | |
665 |
|
662 | |||
666 | def open(ui, url, data=None): |
|
663 | def open(ui, url, data=None): | |
667 | scheme = None |
|
664 | scheme = None | |
668 | m = scheme_re.search(url) |
|
665 | m = scheme_re.search(url) | |
669 | if m: |
|
666 | if m: | |
670 | scheme = m.group(1).lower() |
|
667 | scheme = m.group(1).lower() | |
671 | if not scheme: |
|
668 | if not scheme: | |
672 | path = util.normpath(os.path.abspath(url)) |
|
669 | path = util.normpath(os.path.abspath(url)) | |
673 | url = 'file://' + urllib.pathname2url(path) |
|
670 | url = 'file://' + urllib.pathname2url(path) | |
674 | authinfo = None |
|
671 | authinfo = None | |
675 | else: |
|
672 | else: | |
676 | url, authinfo = getauthinfo(url) |
|
673 | url, authinfo = getauthinfo(url) | |
677 | return opener(ui, authinfo).open(url, data) |
|
674 | return opener(ui, authinfo).open(url, data) |
@@ -1,60 +1,87 b'' | |||||
1 | $ echo "[extensions]" >> $HGRCPATH |
|
1 | $ echo "[extensions]" >> $HGRCPATH | |
2 | $ echo "bookmarks=" >> $HGRCPATH |
|
2 | $ echo "bookmarks=" >> $HGRCPATH | |
3 | $ echo "mq=" >> $HGRCPATH |
|
3 | $ echo "mq=" >> $HGRCPATH | |
4 |
|
4 | |||
5 | $ hg init |
|
5 | $ hg init | |
6 |
|
6 | |||
7 | $ echo qqq>qqq.txt |
|
7 | $ echo qqq>qqq.txt | |
8 |
|
8 | |||
9 | add file |
|
9 | add file | |
10 |
|
10 | |||
11 | $ hg add |
|
11 | $ hg add | |
12 | adding qqq.txt |
|
12 | adding qqq.txt | |
13 |
|
13 | |||
14 | commit first revision |
|
14 | commit first revision | |
15 |
|
15 | |||
16 | $ hg ci -m 1 |
|
16 | $ hg ci -m 1 | |
17 |
|
17 | |||
18 | set bookmark |
|
18 | set bookmark | |
19 |
|
19 | |||
20 | $ hg book test |
|
20 | $ hg book test | |
21 |
|
21 | |||
22 | $ echo www>>qqq.txt |
|
22 | $ echo www>>qqq.txt | |
23 |
|
23 | |||
24 | commit second revision |
|
24 | commit second revision | |
25 |
|
25 | |||
26 | $ hg ci -m 2 |
|
26 | $ hg ci -m 2 | |
27 |
|
27 | |||
28 | set bookmark |
|
28 | set bookmark | |
29 |
|
29 | |||
30 | $ hg book test2 |
|
30 | $ hg book test2 | |
31 |
|
31 | |||
32 | update to -2 |
|
32 | update to -2 | |
33 |
|
33 | |||
34 | $ hg update -r -2 |
|
34 | $ hg update -r -2 | |
35 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
35 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
36 |
|
36 | |||
37 | $ echo eee>>qqq.txt |
|
37 | $ echo eee>>qqq.txt | |
38 |
|
38 | |||
39 | commit new head |
|
39 | commit new head | |
40 |
|
40 | |||
41 | $ hg ci -m 3 |
|
41 | $ hg ci -m 3 | |
42 | created new head |
|
42 | created new head | |
43 |
|
43 | |||
44 | bookmarks updated? |
|
44 | bookmarks updated? | |
45 |
|
45 | |||
46 | $ hg book |
|
46 | $ hg book | |
47 | test 1:25e1ee7a0081 |
|
47 | test 1:25e1ee7a0081 | |
48 | test2 1:25e1ee7a0081 |
|
48 | test2 1:25e1ee7a0081 | |
49 |
|
49 | |||
50 | strip to revision 1 |
|
50 | strip to revision 1 | |
51 |
|
51 | |||
52 | $ hg strip 1 |
|
52 | $ hg strip 1 | |
53 | saved backup bundle to * (glob) |
|
53 | saved backup bundle to * (glob) | |
54 |
|
54 | |||
55 | list bookmarks |
|
55 | list bookmarks | |
56 |
|
56 | |||
57 | $ hg book |
|
57 | $ hg book | |
58 | * test 1:8cf31af87a2b |
|
58 | * test 1:8cf31af87a2b | |
59 | * test2 1:8cf31af87a2b |
|
59 | * test2 1:8cf31af87a2b | |
60 |
|
60 | |||
|
61 | immediate rollback and reentrancy issue | |||
|
62 | ||||
|
63 | $ echo "mq=!" >> $HGRCPATH | |||
|
64 | $ hg init repo | |||
|
65 | $ cd repo | |||
|
66 | $ echo a > a | |||
|
67 | $ hg ci -Am adda | |||
|
68 | adding a | |||
|
69 | $ echo b > b | |||
|
70 | $ hg ci -Am addb | |||
|
71 | adding b | |||
|
72 | $ hg bookmarks markb | |||
|
73 | $ hg rollback | |||
|
74 | rolling back to revision 0 (undo commit) | |||
|
75 | ||||
|
76 | are you there? | |||
|
77 | ||||
|
78 | $ hg bookmarks | |||
|
79 | no bookmarks set | |||
|
80 | ||||
|
81 | can you be added again? | |||
|
82 | ||||
|
83 | $ hg bookmarks markb | |||
|
84 | $ hg bookmarks | |||
|
85 | * markb 0:07f494440405 | |||
|
86 | $ cd .. | |||
|
87 |
@@ -1,80 +1,83 b'' | |||||
1 | #!/bin/sh |
|
1 | #!/bin/sh | |
2 |
|
2 | |||
3 | "$TESTDIR/hghave" darcs || exit 80 |
|
3 | "$TESTDIR/hghave" darcs || exit 80 | |
4 |
|
4 | |||
5 | echo "[extensions]" >> $HGRCPATH |
|
5 | echo "[extensions]" >> $HGRCPATH | |
6 | echo "convert=" >> $HGRCPATH |
|
6 | echo "convert=" >> $HGRCPATH | |
7 | echo 'graphlog =' >> $HGRCPATH |
|
7 | echo 'graphlog =' >> $HGRCPATH | |
8 |
|
8 | |||
9 | DARCS_EMAIL='test@example.org'; export DARCS_EMAIL |
|
9 | DARCS_EMAIL='test@example.org'; export DARCS_EMAIL | |
10 | HOME=`pwd`/do_not_use_HOME_darcs; export HOME |
|
10 | HOME=`pwd`/do_not_use_HOME_darcs; export HOME | |
11 |
|
11 | |||
12 | # skip if we can't import elementtree |
|
12 | # skip if we can't import elementtree | |
13 | mkdir dummy |
|
13 | mkdir dummy | |
14 | mkdir dummy/_darcs |
|
14 | mkdir dummy/_darcs | |
15 | if hg convert dummy 2>&1 | grep ElementTree > /dev/null; then |
|
15 | if hg convert dummy 2>&1 | grep ElementTree > /dev/null; then | |
16 | echo 'skipped: missing feature: elementtree module' |
|
16 | echo 'skipped: missing feature: elementtree module' | |
17 | exit 80 |
|
17 | exit 80 | |
18 | fi |
|
18 | fi | |
19 |
|
19 | |||
|
20 | echo '% try converting darcs1 repository' | |||
|
21 | hg convert -s darcs "$TESTDIR/darcs/darcs1" 2>&1 | grep darcs-1.0 | |||
|
22 | ||||
20 | echo % initialize darcs repo |
|
23 | echo % initialize darcs repo | |
21 | mkdir darcs-repo |
|
24 | mkdir darcs-repo | |
22 | cd darcs-repo |
|
25 | cd darcs-repo | |
23 | darcs init |
|
26 | darcs init | |
24 | echo a > a |
|
27 | echo a > a | |
25 | darcs record -a -l -m p0 |
|
28 | darcs record -a -l -m p0 | |
26 | cd .. |
|
29 | cd .. | |
27 |
|
30 | |||
28 | echo % branch and update |
|
31 | echo % branch and update | |
29 | darcs get darcs-repo darcs-clone >/dev/null |
|
32 | darcs get darcs-repo darcs-clone >/dev/null | |
30 | cd darcs-clone |
|
33 | cd darcs-clone | |
31 | echo c >> a |
|
34 | echo c >> a | |
32 | echo c > c |
|
35 | echo c > c | |
33 | darcs record -a -l -m p1.1 |
|
36 | darcs record -a -l -m p1.1 | |
34 | cd .. |
|
37 | cd .. | |
35 |
|
38 | |||
36 | echo % update source |
|
39 | echo % update source | |
37 | cd darcs-repo |
|
40 | cd darcs-repo | |
38 | echo b >> a |
|
41 | echo b >> a | |
39 | echo b > b |
|
42 | echo b > b | |
40 | darcs record -a -l -m p1.2 |
|
43 | darcs record -a -l -m p1.2 | |
41 |
|
44 | |||
42 | echo % merge branch |
|
45 | echo % merge branch | |
43 | darcs pull -a ../darcs-clone |
|
46 | darcs pull -a ../darcs-clone | |
44 | sleep 1 |
|
47 | sleep 1 | |
45 | echo e > a |
|
48 | echo e > a | |
46 | echo f > f |
|
49 | echo f > f | |
47 | mkdir dir |
|
50 | mkdir dir | |
48 | echo d > dir/d |
|
51 | echo d > dir/d | |
49 | echo d > dir/d2 |
|
52 | echo d > dir/d2 | |
50 | darcs record -a -l -m p2 |
|
53 | darcs record -a -l -m p2 | |
51 |
|
54 | |||
52 | echo % test file and directory move |
|
55 | echo % test file and directory move | |
53 | darcs mv f ff |
|
56 | darcs mv f ff | |
54 | # Test remove + move |
|
57 | # Test remove + move | |
55 | darcs remove dir/d2 |
|
58 | darcs remove dir/d2 | |
56 | rm dir/d2 |
|
59 | rm dir/d2 | |
57 | darcs mv dir dir2 |
|
60 | darcs mv dir dir2 | |
58 | darcs record -a -l -m p3 |
|
61 | darcs record -a -l -m p3 | |
59 |
|
62 | |||
60 | echo % test utf-8 commit message and author |
|
63 | echo % test utf-8 commit message and author | |
61 | echo g > g |
|
64 | echo g > g | |
62 | # darcs is encoding agnostic, so it takes whatever bytes it's given |
|
65 | # darcs is encoding agnostic, so it takes whatever bytes it's given | |
63 | darcs record -a -l -m 'p4: desc ñ' -A 'author ñ' |
|
66 | darcs record -a -l -m 'p4: desc ñ' -A 'author ñ' | |
64 |
|
67 | |||
65 | glog() |
|
68 | glog() | |
66 | { |
|
69 | { | |
67 | HGENCODING=utf-8 hg glog --template '{rev} "{desc|firstline}" ({author}) files: {files}\n' "$@" |
|
70 | HGENCODING=utf-8 hg glog --template '{rev} "{desc|firstline}" ({author}) files: {files}\n' "$@" | |
68 | } |
|
71 | } | |
69 |
|
72 | |||
70 | cd .. |
|
73 | cd .. | |
71 | hg convert darcs-repo darcs-repo-hg |
|
74 | hg convert darcs-repo darcs-repo-hg | |
72 | # The converter does not currently handle patch conflicts very well. |
|
75 | # The converter does not currently handle patch conflicts very well. | |
73 | # When they occur, it reverts *all* changes and moves forward, |
|
76 | # When they occur, it reverts *all* changes and moves forward, | |
74 | # letting the conflict resolving patch fix collisions. |
|
77 | # letting the conflict resolving patch fix collisions. | |
75 | # Unfortunately, non-conflicting changes, like the addition of the |
|
78 | # Unfortunately, non-conflicting changes, like the addition of the | |
76 | # "c" file in p1.1 patch are reverted too. |
|
79 | # "c" file in p1.1 patch are reverted too. | |
77 | # Just to say that manifest not listing "c" here is a bug. |
|
80 | # Just to say that manifest not listing "c" here is a bug. | |
78 | glog -R darcs-repo-hg |
|
81 | glog -R darcs-repo-hg | |
79 | hg up -q -R darcs-repo-hg |
|
82 | hg up -q -R darcs-repo-hg | |
80 | hg -R darcs-repo-hg manifest --debug |
|
83 | hg -R darcs-repo-hg manifest --debug |
@@ -1,43 +1,45 b'' | |||||
|
1 | % try converting darcs1 repository | |||
|
2 | darcs-1.0 repository format is unsupported, please upgrade | |||
1 | % initialize darcs repo |
|
3 | % initialize darcs repo | |
2 | Finished recording patch 'p0' |
|
4 | Finished recording patch 'p0' | |
3 | % branch and update |
|
5 | % branch and update | |
4 | Finished recording patch 'p1.1' |
|
6 | Finished recording patch 'p1.1' | |
5 | % update source |
|
7 | % update source | |
6 | Finished recording patch 'p1.2' |
|
8 | Finished recording patch 'p1.2' | |
7 | % merge branch |
|
9 | % merge branch | |
8 | Backing up ./a(-darcs-backup0) |
|
10 | Backing up ./a(-darcs-backup0) | |
9 | We have conflicts in the following files: |
|
11 | We have conflicts in the following files: | |
10 | ./a |
|
12 | ./a | |
11 | Finished pulling and applying. |
|
13 | Finished pulling and applying. | |
12 | Finished recording patch 'p2' |
|
14 | Finished recording patch 'p2' | |
13 | % test file and directory move |
|
15 | % test file and directory move | |
14 | Finished recording patch 'p3' |
|
16 | Finished recording patch 'p3' | |
15 | % test utf-8 commit message and author |
|
17 | % test utf-8 commit message and author | |
16 | Finished recording patch 'p4: desc ñ' |
|
18 | Finished recording patch 'p4: desc ñ' | |
17 | initializing destination darcs-repo-hg repository |
|
19 | initializing destination darcs-repo-hg repository | |
18 | scanning source... |
|
20 | scanning source... | |
19 | sorting... |
|
21 | sorting... | |
20 | converting... |
|
22 | converting... | |
21 | 5 p0 |
|
23 | 5 p0 | |
22 | 4 p1.2 |
|
24 | 4 p1.2 | |
23 | 3 p1.1 |
|
25 | 3 p1.1 | |
24 | 2 p2 |
|
26 | 2 p2 | |
25 | 1 p3 |
|
27 | 1 p3 | |
26 | 0 p4: desc ? |
|
28 | 0 p4: desc ? | |
27 | o 5 "p4: desc ñ" (author ñ) files: g |
|
29 | o 5 "p4: desc ñ" (author ñ) files: g | |
28 | | |
|
30 | | | |
29 | o 4 "p3" (test@example.org) files: dir/d dir/d2 dir2/d f ff |
|
31 | o 4 "p3" (test@example.org) files: dir/d dir/d2 dir2/d f ff | |
30 | | |
|
32 | | | |
31 | o 3 "p2" (test@example.org) files: a dir/d dir/d2 f |
|
33 | o 3 "p2" (test@example.org) files: a dir/d dir/d2 f | |
32 | | |
|
34 | | | |
33 | o 2 "p1.1" (test@example.org) files: |
|
35 | o 2 "p1.1" (test@example.org) files: | |
34 | | |
|
36 | | | |
35 | o 1 "p1.2" (test@example.org) files: a b |
|
37 | o 1 "p1.2" (test@example.org) files: a b | |
36 | | |
|
38 | | | |
37 | o 0 "p0" (test@example.org) files: a |
|
39 | o 0 "p0" (test@example.org) files: a | |
38 |
|
40 | |||
39 | 7225b30cdf38257d5cc7780772c051b6f33e6d6b 644 a |
|
41 | 7225b30cdf38257d5cc7780772c051b6f33e6d6b 644 a | |
40 | 1e88685f5ddec574a34c70af492f95b6debc8741 644 b |
|
42 | 1e88685f5ddec574a34c70af492f95b6debc8741 644 b | |
41 | 37406831adc447ec2385014019599dfec953c806 644 dir2/d |
|
43 | 37406831adc447ec2385014019599dfec953c806 644 dir2/d | |
42 | b783a337463792a5c7d548ad85a7d3253c16ba8c 644 ff |
|
44 | b783a337463792a5c7d548ad85a7d3253c16ba8c 644 ff | |
43 | 0973eb1b2ecc4de7fafe7447ce1b7462108b4848 644 g |
|
45 | 0973eb1b2ecc4de7fafe7447ce1b7462108b4848 644 g |
General Comments 0
You need to be logged in to leave comments.
Login now