Show More
@@ -0,0 +1,1 b'' | |||
|
1 | test@test.com No newline at end of file |
@@ -0,0 +1,59 b'' | |||
|
1 | # Binary file regexps: | |
|
2 | \.png$ | |
|
3 | \.PNG$ | |
|
4 | \.gz$ | |
|
5 | \.GZ$ | |
|
6 | \.pdf$ | |
|
7 | \.PDF$ | |
|
8 | \.jpg$ | |
|
9 | \.JPG$ | |
|
10 | \.jpeg$ | |
|
11 | \.JPEG$ | |
|
12 | \.gif$ | |
|
13 | \.GIF$ | |
|
14 | \.tif$ | |
|
15 | \.TIF$ | |
|
16 | \.tiff$ | |
|
17 | \.TIFF$ | |
|
18 | \.pnm$ | |
|
19 | \.PNM$ | |
|
20 | \.pbm$ | |
|
21 | \.PBM$ | |
|
22 | \.pgm$ | |
|
23 | \.PGM$ | |
|
24 | \.ppm$ | |
|
25 | \.PPM$ | |
|
26 | \.bmp$ | |
|
27 | \.BMP$ | |
|
28 | \.mng$ | |
|
29 | \.MNG$ | |
|
30 | \.tar$ | |
|
31 | \.TAR$ | |
|
32 | \.bz2$ | |
|
33 | \.BZ2$ | |
|
34 | \.z$ | |
|
35 | \.Z$ | |
|
36 | \.zip$ | |
|
37 | \.ZIP$ | |
|
38 | \.jar$ | |
|
39 | \.JAR$ | |
|
40 | \.so$ | |
|
41 | \.SO$ | |
|
42 | \.a$ | |
|
43 | \.A$ | |
|
44 | \.tgz$ | |
|
45 | \.TGZ$ | |
|
46 | \.mpg$ | |
|
47 | \.MPG$ | |
|
48 | \.mpeg$ | |
|
49 | \.MPEG$ | |
|
50 | \.iso$ | |
|
51 | \.ISO$ | |
|
52 | \.exe$ | |
|
53 | \.EXE$ | |
|
54 | \.doc$ | |
|
55 | \.DOC$ | |
|
56 | \.elc$ | |
|
57 | \.ELC$ | |
|
58 | \.pyc$ | |
|
59 | \.PYC$ |
@@ -0,0 +1,49 b'' | |||
|
1 | # Boring file regexps: | |
|
2 | \.hi$ | |
|
3 | \.hi-boot$ | |
|
4 | \.o-boot$ | |
|
5 | \.o$ | |
|
6 | \.o\.cmd$ | |
|
7 | # *.ko files aren't boring by default because they might | |
|
8 | # be Korean translations rather than kernel modules. | |
|
9 | # \.ko$ | |
|
10 | \.ko\.cmd$ | |
|
11 | \.mod\.c$ | |
|
12 | (^|/)\.tmp_versions($|/) | |
|
13 | (^|/)CVS($|/) | |
|
14 | \.cvsignore$ | |
|
15 | ^\.# | |
|
16 | (^|/)RCS($|/) | |
|
17 | ,v$ | |
|
18 | (^|/)\.svn($|/) | |
|
19 | \.bzr$ | |
|
20 | (^|/)SCCS($|/) | |
|
21 | ~$ | |
|
22 | (^|/)_darcs($|/) | |
|
23 | \.bak$ | |
|
24 | \.BAK$ | |
|
25 | \.orig$ | |
|
26 | \.rej$ | |
|
27 | (^|/)vssver\.scc$ | |
|
28 | \.swp$ | |
|
29 | (^|/)MT($|/) | |
|
30 | (^|/)\{arch\}($|/) | |
|
31 | (^|/).arch-ids($|/) | |
|
32 | (^|/), | |
|
33 | \.prof$ | |
|
34 | (^|/)\.DS_Store$ | |
|
35 | (^|/)BitKeeper($|/) | |
|
36 | (^|/)ChangeSet($|/) | |
|
37 | \.py[co]$ | |
|
38 | \.elc$ | |
|
39 | \.class$ | |
|
40 | \# | |
|
41 | (^|/)Thumbs\.db$ | |
|
42 | (^|/)autom4te\.cache($|/) | |
|
43 | (^|/)config\.(log|status)$ | |
|
44 | ^\.depend$ | |
|
45 | (^|/)(tags|TAGS)$ | |
|
46 | #(^|/)\.[^/] | |
|
47 | (^|/|\.)core$ | |
|
48 | \.(obj|a|exe|so|lo|la)$ | |
|
49 | ^\.darcs-temp-mail$ |
|
1 | NO CONTENT: new file 100644 |
@@ -0,0 +1,1 b'' | |||
|
1 | a |
@@ -0,0 +1,1 b'' | |||
|
1 | a |
@@ -1,545 +1,549 b'' | |||
|
1 | 1 | # Mercurial extension to provide the 'hg bookmark' command |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 David Soria Parra <dsp@php.net> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''track a line of development with movable markers |
|
9 | 9 | |
|
10 | 10 | Bookmarks are local movable markers to changesets. Every bookmark |
|
11 | 11 | points to a changeset identified by its hash. If you commit a |
|
12 | 12 | changeset that is based on a changeset that has a bookmark on it, the |
|
13 | 13 | bookmark shifts to the new changeset. |
|
14 | 14 | |
|
15 | 15 | It is possible to use bookmark names in every revision lookup (e.g. |
|
16 | 16 | :hg:`merge`, :hg:`update`). |
|
17 | 17 | |
|
18 | 18 | By default, when several bookmarks point to the same changeset, they |
|
19 | 19 | will all move forward together. It is possible to obtain a more |
|
20 | 20 | git-like experience by adding the following configuration option to |
|
21 | 21 | your configuration file:: |
|
22 | 22 | |
|
23 | 23 | [bookmarks] |
|
24 | 24 | track.current = True |
|
25 | 25 | |
|
26 | 26 | This will cause Mercurial to track the bookmark that you are currently |
|
27 | 27 | using, and only update it. This is similar to git's approach to |
|
28 | 28 | branching. |
|
29 | 29 | ''' |
|
30 | 30 | |
|
31 | 31 | from mercurial.i18n import _ |
|
32 | 32 | from mercurial.node import nullid, nullrev, hex, short |
|
33 | 33 | from mercurial import util, commands, repair, extensions, pushkey, hg, url |
|
34 | 34 | import os |
|
35 | 35 | |
|
36 | 36 | def write(repo): |
|
37 | 37 | '''Write bookmarks |
|
38 | 38 | |
|
39 | 39 | Write the given bookmark => hash dictionary to the .hg/bookmarks file |
|
40 | 40 | in a format equal to those of localtags. |
|
41 | 41 | |
|
42 | 42 | We also store a backup of the previous state in undo.bookmarks that |
|
43 | 43 | can be copied back on rollback. |
|
44 | 44 | ''' |
|
45 | 45 | refs = repo._bookmarks |
|
46 | 46 | if os.path.exists(repo.join('bookmarks')): |
|
47 | 47 | util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) |
|
48 | 48 | if repo._bookmarkcurrent not in refs: |
|
49 | 49 | setcurrent(repo, None) |
|
50 | 50 | wlock = repo.wlock() |
|
51 | 51 | try: |
|
52 | 52 | file = repo.opener('bookmarks', 'w', atomictemp=True) |
|
53 | 53 | for refspec, node in refs.iteritems(): |
|
54 | 54 | file.write("%s %s\n" % (hex(node), refspec)) |
|
55 | 55 | file.rename() |
|
56 | 56 | |
|
57 | 57 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) |
|
58 | 58 | try: |
|
59 | 59 | os.utime(repo.sjoin('00changelog.i'), None) |
|
60 | 60 | except OSError: |
|
61 | 61 | pass |
|
62 | 62 | |
|
63 | 63 | finally: |
|
64 | 64 | wlock.release() |
|
65 | 65 | |
|
66 | 66 | def setcurrent(repo, mark): |
|
67 | 67 | '''Set the name of the bookmark that we are currently on |
|
68 | 68 | |
|
69 | 69 | Set the name of the bookmark that we are on (hg update <bookmark>). |
|
70 | 70 | The name is recorded in .hg/bookmarks.current |
|
71 | 71 | ''' |
|
72 | 72 | current = repo._bookmarkcurrent |
|
73 | 73 | if current == mark: |
|
74 | 74 | return |
|
75 | 75 | |
|
76 | 76 | refs = repo._bookmarks |
|
77 | 77 | |
|
78 | 78 | # do not update if we do update to a rev equal to the current bookmark |
|
79 | 79 | if (mark and mark not in refs and |
|
80 | 80 | current and refs[current] == repo.changectx('.').node()): |
|
81 | 81 | return |
|
82 | 82 | if mark not in refs: |
|
83 | 83 | mark = '' |
|
84 | 84 | wlock = repo.wlock() |
|
85 | 85 | try: |
|
86 | 86 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) |
|
87 | 87 | file.write(mark) |
|
88 | 88 | file.rename() |
|
89 | 89 | finally: |
|
90 | 90 | wlock.release() |
|
91 | 91 | repo._bookmarkcurrent = mark |
|
92 | 92 | |
|
93 | 93 | def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None): |
|
94 | 94 | '''track a line of development with movable markers |
|
95 | 95 | |
|
96 | 96 | Bookmarks are pointers to certain commits that move when |
|
97 | 97 | committing. Bookmarks are local. They can be renamed, copied and |
|
98 | 98 | deleted. It is possible to use bookmark names in :hg:`merge` and |
|
99 | 99 | :hg:`update` to merge and update respectively to a given bookmark. |
|
100 | 100 | |
|
101 | 101 | You can use :hg:`bookmark NAME` to set a bookmark on the working |
|
102 | 102 | directory's parent revision with the given name. If you specify |
|
103 | 103 | a revision using -r REV (where REV may be an existing bookmark), |
|
104 | 104 | the bookmark is assigned to that revision. |
|
105 | 105 | ''' |
|
106 | 106 | hexfn = ui.debugflag and hex or short |
|
107 | 107 | marks = repo._bookmarks |
|
108 | 108 | cur = repo.changectx('.').node() |
|
109 | 109 | |
|
110 | 110 | if rename: |
|
111 | 111 | if rename not in marks: |
|
112 | 112 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
113 | 113 | if mark in marks and not force: |
|
114 | 114 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
115 | 115 | if mark is None: |
|
116 | 116 | raise util.Abort(_("new bookmark name required")) |
|
117 | 117 | marks[mark] = marks[rename] |
|
118 | 118 | del marks[rename] |
|
119 | 119 | if repo._bookmarkcurrent == rename: |
|
120 | 120 | setcurrent(repo, mark) |
|
121 | 121 | write(repo) |
|
122 | 122 | return |
|
123 | 123 | |
|
124 | 124 | if delete: |
|
125 | 125 | if mark is None: |
|
126 | 126 | raise util.Abort(_("bookmark name required")) |
|
127 | 127 | if mark not in marks: |
|
128 | 128 | raise util.Abort(_("a bookmark of this name does not exist")) |
|
129 | 129 | if mark == repo._bookmarkcurrent: |
|
130 | 130 | setcurrent(repo, None) |
|
131 | 131 | del marks[mark] |
|
132 | 132 | write(repo) |
|
133 | 133 | return |
|
134 | 134 | |
|
135 | 135 | if mark != None: |
|
136 | 136 | if "\n" in mark: |
|
137 | 137 | raise util.Abort(_("bookmark name cannot contain newlines")) |
|
138 | 138 | mark = mark.strip() |
|
139 | 139 | if not mark: |
|
140 | 140 | raise util.Abort(_("bookmark names cannot consist entirely of " |
|
141 | 141 | "whitespace")) |
|
142 | 142 | if mark in marks and not force: |
|
143 | 143 | raise util.Abort(_("a bookmark of the same name already exists")) |
|
144 | 144 | if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) |
|
145 | 145 | and not force): |
|
146 | 146 | raise util.Abort( |
|
147 | 147 | _("a bookmark cannot have the name of an existing branch")) |
|
148 | 148 | if rev: |
|
149 | 149 | marks[mark] = repo.lookup(rev) |
|
150 | 150 | else: |
|
151 | 151 | marks[mark] = repo.changectx('.').node() |
|
152 | 152 | setcurrent(repo, mark) |
|
153 | 153 | write(repo) |
|
154 | 154 | return |
|
155 | 155 | |
|
156 | 156 | if mark is None: |
|
157 | 157 | if rev: |
|
158 | 158 | raise util.Abort(_("bookmark name required")) |
|
159 | 159 | if len(marks) == 0: |
|
160 | 160 | ui.status(_("no bookmarks set\n")) |
|
161 | 161 | else: |
|
162 | 162 | for bmark, n in marks.iteritems(): |
|
163 | 163 | if ui.configbool('bookmarks', 'track.current'): |
|
164 | 164 | current = repo._bookmarkcurrent |
|
165 | 165 | if bmark == current and n == cur: |
|
166 | 166 | prefix, label = '*', 'bookmarks.current' |
|
167 | 167 | else: |
|
168 | 168 | prefix, label = ' ', '' |
|
169 | 169 | else: |
|
170 | 170 | if n == cur: |
|
171 | 171 | prefix, label = '*', 'bookmarks.current' |
|
172 | 172 | else: |
|
173 | 173 | prefix, label = ' ', '' |
|
174 | 174 | |
|
175 | 175 | if ui.quiet: |
|
176 | 176 | ui.write("%s\n" % bmark, label=label) |
|
177 | 177 | else: |
|
178 | 178 | ui.write(" %s %-25s %d:%s\n" % ( |
|
179 | 179 | prefix, bmark, repo.changelog.rev(n), hexfn(n)), |
|
180 | 180 | label=label) |
|
181 | 181 | return |
|
182 | 182 | |
|
183 | 183 | def _revstostrip(changelog, node): |
|
184 | 184 | srev = changelog.rev(node) |
|
185 | 185 | tostrip = [srev] |
|
186 | 186 | saveheads = [] |
|
187 | 187 | for r in xrange(srev, len(changelog)): |
|
188 | 188 | parents = changelog.parentrevs(r) |
|
189 | 189 | if parents[0] in tostrip or parents[1] in tostrip: |
|
190 | 190 | tostrip.append(r) |
|
191 | 191 | if parents[1] != nullrev: |
|
192 | 192 | for p in parents: |
|
193 | 193 | if p not in tostrip and p > srev: |
|
194 | 194 | saveheads.append(p) |
|
195 | 195 | return [r for r in tostrip if r not in saveheads] |
|
196 | 196 | |
|
197 | 197 | def strip(oldstrip, ui, repo, node, backup="all"): |
|
198 | 198 | """Strip bookmarks if revisions are stripped using |
|
199 | 199 | the mercurial.strip method. This usually happens during |
|
200 | 200 | qpush and qpop""" |
|
201 | 201 | revisions = _revstostrip(repo.changelog, node) |
|
202 | 202 | marks = repo._bookmarks |
|
203 | 203 | update = [] |
|
204 | 204 | for mark, n in marks.iteritems(): |
|
205 | 205 | if repo.changelog.rev(n) in revisions: |
|
206 | 206 | update.append(mark) |
|
207 | 207 | oldstrip(ui, repo, node, backup) |
|
208 | 208 | if len(update) > 0: |
|
209 | 209 | for m in update: |
|
210 | 210 | marks[m] = repo.changectx('.').node() |
|
211 | 211 | write(repo) |
|
212 | 212 | |
|
213 | 213 | def reposetup(ui, repo): |
|
214 | 214 | if not repo.local(): |
|
215 | 215 | return |
|
216 | 216 | |
|
217 | 217 | class bookmark_repo(repo.__class__): |
|
218 | 218 | |
|
219 | 219 | @util.propertycache |
|
220 | 220 | def _bookmarks(self): |
|
221 | 221 | '''Parse .hg/bookmarks file and return a dictionary |
|
222 | 222 | |
|
223 | 223 | Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values |
|
224 | 224 | in the .hg/bookmarks file. |
|
225 | 225 | Read the file and return a (name=>nodeid) dictionary |
|
226 | 226 | ''' |
|
227 | self._loadingbookmarks = True | |
|
227 | 228 | try: |
|
228 | 229 | bookmarks = {} |
|
229 | 230 | for line in self.opener('bookmarks'): |
|
230 | 231 | sha, refspec = line.strip().split(' ', 1) |
|
231 | 232 | bookmarks[refspec] = super(bookmark_repo, self).lookup(sha) |
|
232 | 233 | except: |
|
233 | 234 | pass |
|
235 | self._loadingbookmarks = False | |
|
234 | 236 | return bookmarks |
|
235 | 237 | |
|
236 | 238 | @util.propertycache |
|
237 | 239 | def _bookmarkcurrent(self): |
|
238 | 240 | '''Get the current bookmark |
|
239 | 241 | |
|
240 | 242 | If we use gittishsh branches we have a current bookmark that |
|
241 | 243 | we are on. This function returns the name of the bookmark. It |
|
242 | 244 | is stored in .hg/bookmarks.current |
|
243 | 245 | ''' |
|
244 | 246 | mark = None |
|
245 | 247 | if os.path.exists(self.join('bookmarks.current')): |
|
246 | 248 | file = self.opener('bookmarks.current') |
|
247 | 249 | # No readline() in posixfile_nt, reading everything is cheap |
|
248 | 250 | mark = (file.readlines() or [''])[0] |
|
249 | 251 | if mark == '': |
|
250 | 252 | mark = None |
|
251 | 253 | file.close() |
|
252 | 254 | return mark |
|
253 | 255 | |
|
254 | 256 | def rollback(self, *args): |
|
255 | 257 | if os.path.exists(self.join('undo.bookmarks')): |
|
256 | 258 | util.rename(self.join('undo.bookmarks'), self.join('bookmarks')) |
|
257 | 259 | return super(bookmark_repo, self).rollback(*args) |
|
258 | 260 | |
|
259 | 261 | def lookup(self, key): |
|
260 |
if |
|
|
261 |
key |
|
|
262 | if not getattr(self, '_loadingbookmarks', False): | |
|
263 | if key in self._bookmarks: | |
|
264 | key = self._bookmarks[key] | |
|
262 | 265 | return super(bookmark_repo, self).lookup(key) |
|
263 | 266 | |
|
264 | 267 | def _bookmarksupdate(self, parents, node): |
|
265 | 268 | marks = self._bookmarks |
|
266 | 269 | update = False |
|
267 | 270 | if ui.configbool('bookmarks', 'track.current'): |
|
268 | 271 | mark = self._bookmarkcurrent |
|
269 | 272 | if mark and marks[mark] in parents: |
|
270 | 273 | marks[mark] = node |
|
271 | 274 | update = True |
|
272 | 275 | else: |
|
273 | 276 | for mark, n in marks.items(): |
|
274 | 277 | if n in parents: |
|
275 | 278 | marks[mark] = node |
|
276 | 279 | update = True |
|
277 | 280 | if update: |
|
278 | 281 | write(self) |
|
279 | 282 | |
|
280 | 283 | def commitctx(self, ctx, error=False): |
|
281 | 284 | """Add a revision to the repository and |
|
282 | 285 | move the bookmark""" |
|
283 | 286 | wlock = self.wlock() # do both commit and bookmark with lock held |
|
284 | 287 | try: |
|
285 | 288 | node = super(bookmark_repo, self).commitctx(ctx, error) |
|
286 | 289 | if node is None: |
|
287 | 290 | return None |
|
288 | 291 | parents = self.changelog.parents(node) |
|
289 | 292 | if parents[1] == nullid: |
|
290 | 293 | parents = (parents[0],) |
|
291 | 294 | |
|
292 | 295 | self._bookmarksupdate(parents, node) |
|
293 | 296 | return node |
|
294 | 297 | finally: |
|
295 | 298 | wlock.release() |
|
296 | 299 | |
|
297 | 300 | def pull(self, remote, heads=None, force=False): |
|
298 | 301 | result = super(bookmark_repo, self).pull(remote, heads, force) |
|
299 | 302 | |
|
300 | 303 | self.ui.debug("checking for updated bookmarks\n") |
|
301 | 304 | rb = remote.listkeys('bookmarks') |
|
302 | 305 | changed = False |
|
303 | 306 | for k in rb.keys(): |
|
304 | 307 | if k in self._bookmarks: |
|
305 | 308 | nr, nl = rb[k], self._bookmarks[k] |
|
306 | 309 | if nr in self: |
|
307 | 310 | cr = self[nr] |
|
308 | 311 | cl = self[nl] |
|
309 | 312 | if cl.rev() >= cr.rev(): |
|
310 | 313 | continue |
|
311 | 314 | if cr in cl.descendants(): |
|
312 | 315 | self._bookmarks[k] = cr.node() |
|
313 | 316 | changed = True |
|
314 | 317 | self.ui.status(_("updating bookmark %s\n") % k) |
|
315 | 318 | else: |
|
316 | 319 | self.ui.warn(_("not updating divergent" |
|
317 | 320 | " bookmark %s\n") % k) |
|
318 | 321 | if changed: |
|
319 | 322 | write(repo) |
|
320 | 323 | |
|
321 | 324 | return result |
|
322 | 325 | |
|
323 | 326 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
324 | 327 | result = super(bookmark_repo, self).push(remote, force, revs, |
|
325 | 328 | newbranch) |
|
326 | 329 | |
|
327 | 330 | self.ui.debug("checking for updated bookmarks\n") |
|
328 | 331 | rb = remote.listkeys('bookmarks') |
|
329 | 332 | for k in rb.keys(): |
|
330 | 333 | if k in self._bookmarks: |
|
331 | 334 | nr, nl = rb[k], self._bookmarks[k] |
|
332 | 335 | if nr in self: |
|
333 | 336 | cr = self[nr] |
|
334 | 337 | cl = self[nl] |
|
335 | 338 | if cl in cr.descendants(): |
|
336 | 339 | r = remote.pushkey('bookmarks', k, nr, nl) |
|
337 | 340 | if r: |
|
338 | 341 | self.ui.status(_("updating bookmark %s\n") % k) |
|
339 | 342 | else: |
|
340 | 343 | self.ui.warn(_('updating bookmark %s' |
|
341 | 344 | ' failed!\n') % k) |
|
342 | 345 | |
|
343 | 346 | return result |
|
344 | 347 | |
|
345 | 348 | def addchangegroup(self, *args, **kwargs): |
|
346 | 349 | parents = self.dirstate.parents() |
|
347 | 350 | |
|
348 | 351 | result = super(bookmark_repo, self).addchangegroup(*args, **kwargs) |
|
349 | 352 | if result > 1: |
|
350 | 353 | # We have more heads than before |
|
351 | 354 | return result |
|
352 | 355 | node = self.changelog.tip() |
|
353 | 356 | |
|
354 | 357 | self._bookmarksupdate(parents, node) |
|
355 | 358 | return result |
|
356 | 359 | |
|
357 | 360 | def _findtags(self): |
|
358 | 361 | """Merge bookmarks with normal tags""" |
|
359 | 362 | (tags, tagtypes) = super(bookmark_repo, self)._findtags() |
|
360 |
|
|
|
363 | if not getattr(self, '_loadingbookmarks', False): | |
|
364 | tags.update(self._bookmarks) | |
|
361 | 365 | return (tags, tagtypes) |
|
362 | 366 | |
|
363 | 367 | if hasattr(repo, 'invalidate'): |
|
364 | 368 | def invalidate(self): |
|
365 | 369 | super(bookmark_repo, self).invalidate() |
|
366 | 370 | for attr in ('_bookmarks', '_bookmarkcurrent'): |
|
367 | 371 | if attr in self.__dict__: |
|
368 | 372 | delattr(self, attr) |
|
369 | 373 | |
|
370 | 374 | repo.__class__ = bookmark_repo |
|
371 | 375 | |
|
372 | 376 | def listbookmarks(repo): |
|
373 | 377 | # We may try to list bookmarks on a repo type that does not |
|
374 | 378 | # support it (e.g., statichttprepository). |
|
375 | 379 | if not hasattr(repo, '_bookmarks'): |
|
376 | 380 | return {} |
|
377 | 381 | |
|
378 | 382 | d = {} |
|
379 | 383 | for k, v in repo._bookmarks.iteritems(): |
|
380 | 384 | d[k] = hex(v) |
|
381 | 385 | return d |
|
382 | 386 | |
|
383 | 387 | def pushbookmark(repo, key, old, new): |
|
384 | 388 | w = repo.wlock() |
|
385 | 389 | try: |
|
386 | 390 | marks = repo._bookmarks |
|
387 | 391 | if hex(marks.get(key, '')) != old: |
|
388 | 392 | return False |
|
389 | 393 | if new == '': |
|
390 | 394 | del marks[key] |
|
391 | 395 | else: |
|
392 | 396 | if new not in repo: |
|
393 | 397 | return False |
|
394 | 398 | marks[key] = repo[new].node() |
|
395 | 399 | write(repo) |
|
396 | 400 | return True |
|
397 | 401 | finally: |
|
398 | 402 | w.release() |
|
399 | 403 | |
|
400 | 404 | def pull(oldpull, ui, repo, source="default", **opts): |
|
401 | 405 | # translate bookmark args to rev args for actual pull |
|
402 | 406 | if opts.get('bookmark'): |
|
403 | 407 | # this is an unpleasant hack as pull will do this internally |
|
404 | 408 | source, branches = hg.parseurl(ui.expandpath(source), |
|
405 | 409 | opts.get('branch')) |
|
406 | 410 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
407 | 411 | rb = other.listkeys('bookmarks') |
|
408 | 412 | |
|
409 | 413 | for b in opts['bookmark']: |
|
410 | 414 | if b not in rb: |
|
411 | 415 | raise util.Abort(_('remote bookmark %s not found!') % b) |
|
412 | 416 | opts.setdefault('rev', []).append(b) |
|
413 | 417 | |
|
414 | 418 | result = oldpull(ui, repo, source, **opts) |
|
415 | 419 | |
|
416 | 420 | # update specified bookmarks |
|
417 | 421 | if opts.get('bookmark'): |
|
418 | 422 | for b in opts['bookmark']: |
|
419 | 423 | # explicit pull overrides local bookmark if any |
|
420 | 424 | ui.status(_("importing bookmark %s\n") % b) |
|
421 | 425 | repo._bookmarks[b] = repo[rb[b]].node() |
|
422 | 426 | write(repo) |
|
423 | 427 | |
|
424 | 428 | return result |
|
425 | 429 | |
|
426 | 430 | def push(oldpush, ui, repo, dest=None, **opts): |
|
427 | 431 | dopush = True |
|
428 | 432 | if opts.get('bookmark'): |
|
429 | 433 | dopush = False |
|
430 | 434 | for b in opts['bookmark']: |
|
431 | 435 | if b in repo._bookmarks: |
|
432 | 436 | dopush = True |
|
433 | 437 | opts.setdefault('rev', []).append(b) |
|
434 | 438 | |
|
435 | 439 | result = 0 |
|
436 | 440 | if dopush: |
|
437 | 441 | result = oldpush(ui, repo, dest, **opts) |
|
438 | 442 | |
|
439 | 443 | if opts.get('bookmark'): |
|
440 | 444 | # this is an unpleasant hack as push will do this internally |
|
441 | 445 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
442 | 446 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
443 | 447 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
444 | 448 | rb = other.listkeys('bookmarks') |
|
445 | 449 | for b in opts['bookmark']: |
|
446 | 450 | # explicit push overrides remote bookmark if any |
|
447 | 451 | if b in repo._bookmarks: |
|
448 | 452 | ui.status(_("exporting bookmark %s\n") % b) |
|
449 | 453 | new = repo[b].hex() |
|
450 | 454 | elif b in rb: |
|
451 | 455 | ui.status(_("deleting remote bookmark %s\n") % b) |
|
452 | 456 | new = '' # delete |
|
453 | 457 | else: |
|
454 | 458 | ui.warn(_('bookmark %s does not exist on the local ' |
|
455 | 459 | 'or remote repository!\n') % b) |
|
456 | 460 | return 2 |
|
457 | 461 | old = rb.get(b, '') |
|
458 | 462 | r = other.pushkey('bookmarks', b, old, new) |
|
459 | 463 | if not r: |
|
460 | 464 | ui.warn(_('updating bookmark %s failed!\n') % b) |
|
461 | 465 | if not result: |
|
462 | 466 | result = 2 |
|
463 | 467 | |
|
464 | 468 | return result |
|
465 | 469 | |
|
466 | 470 | def diffbookmarks(ui, repo, remote): |
|
467 | 471 | ui.status(_("searching for changes\n")) |
|
468 | 472 | |
|
469 | 473 | lmarks = repo.listkeys('bookmarks') |
|
470 | 474 | rmarks = remote.listkeys('bookmarks') |
|
471 | 475 | |
|
472 | 476 | diff = sorted(set(rmarks) - set(lmarks)) |
|
473 | 477 | for k in diff: |
|
474 | 478 | ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) |
|
475 | 479 | |
|
476 | 480 | if len(diff) <= 0: |
|
477 | 481 | ui.status(_("no changes found\n")) |
|
478 | 482 | return 1 |
|
479 | 483 | return 0 |
|
480 | 484 | |
|
481 | 485 | def incoming(oldincoming, ui, repo, source="default", **opts): |
|
482 | 486 | if opts.get('bookmarks'): |
|
483 | 487 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
484 | 488 | other = hg.repository(hg.remoteui(repo, opts), source) |
|
485 | 489 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
486 | 490 | return diffbookmarks(ui, repo, other) |
|
487 | 491 | else: |
|
488 | 492 | return oldincoming(ui, repo, source, **opts) |
|
489 | 493 | |
|
490 | 494 | def outgoing(oldoutgoing, ui, repo, dest=None, **opts): |
|
491 | 495 | if opts.get('bookmarks'): |
|
492 | 496 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
493 | 497 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
494 | 498 | other = hg.repository(hg.remoteui(repo, opts), dest) |
|
495 | 499 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
496 | 500 | return diffbookmarks(ui, other, repo) |
|
497 | 501 | else: |
|
498 | 502 | return oldoutgoing(ui, repo, dest, **opts) |
|
499 | 503 | |
|
500 | 504 | def uisetup(ui): |
|
501 | 505 | extensions.wrapfunction(repair, "strip", strip) |
|
502 | 506 | if ui.configbool('bookmarks', 'track.current'): |
|
503 | 507 | extensions.wrapcommand(commands.table, 'update', updatecurbookmark) |
|
504 | 508 | |
|
505 | 509 | entry = extensions.wrapcommand(commands.table, 'pull', pull) |
|
506 | 510 | entry[1].append(('B', 'bookmark', [], |
|
507 | 511 | _("bookmark to import"), |
|
508 | 512 | _('BOOKMARK'))) |
|
509 | 513 | entry = extensions.wrapcommand(commands.table, 'push', push) |
|
510 | 514 | entry[1].append(('B', 'bookmark', [], |
|
511 | 515 | _("bookmark to export"), |
|
512 | 516 | _('BOOKMARK'))) |
|
513 | 517 | entry = extensions.wrapcommand(commands.table, 'incoming', incoming) |
|
514 | 518 | entry[1].append(('B', 'bookmarks', False, |
|
515 | 519 | _("compare bookmark"))) |
|
516 | 520 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) |
|
517 | 521 | entry[1].append(('B', 'bookmarks', False, |
|
518 | 522 | _("compare bookmark"))) |
|
519 | 523 | |
|
520 | 524 | pushkey.register('bookmarks', pushbookmark, listbookmarks) |
|
521 | 525 | |
|
522 | 526 | def updatecurbookmark(orig, ui, repo, *args, **opts): |
|
523 | 527 | '''Set the current bookmark |
|
524 | 528 | |
|
525 | 529 | If the user updates to a bookmark we update the .hg/bookmarks.current |
|
526 | 530 | file. |
|
527 | 531 | ''' |
|
528 | 532 | res = orig(ui, repo, *args, **opts) |
|
529 | 533 | rev = opts['rev'] |
|
530 | 534 | if not rev and len(args) > 0: |
|
531 | 535 | rev = args[0] |
|
532 | 536 | setcurrent(repo, rev) |
|
533 | 537 | return res |
|
534 | 538 | |
|
535 | 539 | cmdtable = { |
|
536 | 540 | "bookmarks": |
|
537 | 541 | (bookmark, |
|
538 | 542 | [('f', 'force', False, _('force')), |
|
539 | 543 | ('r', 'rev', '', _('revision'), _('REV')), |
|
540 | 544 | ('d', 'delete', False, _('delete a given bookmark')), |
|
541 | 545 | ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))], |
|
542 | 546 | _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')), |
|
543 | 547 | } |
|
544 | 548 | |
|
545 | 549 | colortable = {'bookmarks.current': 'green'} |
@@ -1,173 +1,188 b'' | |||
|
1 | 1 | # darcs.py - darcs support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from common import NoRepo, checktool, commandline, commit, converter_source |
|
9 | 9 | from mercurial.i18n import _ |
|
10 | 10 | from mercurial import util |
|
11 | import os, shutil, tempfile | |
|
11 | import os, shutil, tempfile, re | |
|
12 | 12 | |
|
13 | 13 | # The naming drift of ElementTree is fun! |
|
14 | 14 | |
|
15 | 15 | try: |
|
16 | 16 | from xml.etree.cElementTree import ElementTree |
|
17 | 17 | except ImportError: |
|
18 | 18 | try: |
|
19 | 19 | from xml.etree.ElementTree import ElementTree |
|
20 | 20 | except ImportError: |
|
21 | 21 | try: |
|
22 | 22 | from elementtree.cElementTree import ElementTree |
|
23 | 23 | except ImportError: |
|
24 | 24 | try: |
|
25 | 25 | from elementtree.ElementTree import ElementTree |
|
26 | 26 | except ImportError: |
|
27 | 27 | ElementTree = None |
|
28 | 28 | |
|
29 | 29 | class darcs_source(converter_source, commandline): |
|
30 | 30 | def __init__(self, ui, path, rev=None): |
|
31 | 31 | converter_source.__init__(self, ui, path, rev=rev) |
|
32 | 32 | commandline.__init__(self, ui, 'darcs') |
|
33 | 33 | |
|
34 |
# check for _darcs, ElementTree |
|
|
35 |
# |
|
|
36 | if not os.path.exists(os.path.join(path, '_darcs', 'inventories')): | |
|
37 | raise NoRepo(_("%s does not look like a darcs repository") % path) | |
|
38 | ||
|
34 | # check for _darcs, ElementTree so that we can easily skip | |
|
35 | # test-convert-darcs if ElementTree is not around | |
|
39 | 36 | if not os.path.exists(os.path.join(path, '_darcs')): |
|
40 | 37 | raise NoRepo(_("%s does not look like a darcs repository") % path) |
|
41 | 38 | |
|
42 | 39 | checktool('darcs') |
|
43 | 40 | version = self.run0('--version').splitlines()[0].strip() |
|
44 | 41 | if version < '2.1': |
|
45 | 42 | raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') % |
|
46 | 43 | version) |
|
47 | 44 | |
|
48 | 45 | if ElementTree is None: |
|
49 | 46 | raise util.Abort(_("Python ElementTree module is not available")) |
|
50 | 47 | |
|
51 | 48 | self.path = os.path.realpath(path) |
|
52 | 49 | |
|
53 | 50 | self.lastrev = None |
|
54 | 51 | self.changes = {} |
|
55 | 52 | self.parents = {} |
|
56 | 53 | self.tags = {} |
|
57 | 54 | |
|
55 | # Check darcs repository format | |
|
56 | format = self.format() | |
|
57 | if format: | |
|
58 | if format in ('darcs-1.0', 'hashed'): | |
|
59 | raise NoRepo(_("%s repository format is unsupported, " | |
|
60 | "please upgrade") % format) | |
|
61 | else: | |
|
62 | self.ui.warn(_('failed to detect repository format!')) | |
|
63 | ||
|
58 | 64 | def before(self): |
|
59 | 65 | self.tmppath = tempfile.mkdtemp( |
|
60 | 66 | prefix='convert-' + os.path.basename(self.path) + '-') |
|
61 | 67 | output, status = self.run('init', repodir=self.tmppath) |
|
62 | 68 | self.checkexit(status) |
|
63 | 69 | |
|
64 | 70 | tree = self.xml('changes', xml_output=True, summary=True, |
|
65 | 71 | repodir=self.path) |
|
66 | 72 | tagname = None |
|
67 | 73 | child = None |
|
68 | 74 | for elt in tree.findall('patch'): |
|
69 | 75 | node = elt.get('hash') |
|
70 | 76 | name = elt.findtext('name', '') |
|
71 | 77 | if name.startswith('TAG '): |
|
72 | 78 | tagname = name[4:].strip() |
|
73 | 79 | elif tagname is not None: |
|
74 | 80 | self.tags[tagname] = node |
|
75 | 81 | tagname = None |
|
76 | 82 | self.changes[node] = elt |
|
77 | 83 | self.parents[child] = [node] |
|
78 | 84 | child = node |
|
79 | 85 | self.parents[child] = [] |
|
80 | 86 | |
|
81 | 87 | def after(self): |
|
82 | 88 | self.ui.debug('cleaning up %s\n' % self.tmppath) |
|
83 | 89 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
84 | 90 | |
|
85 | 91 | def xml(self, cmd, **kwargs): |
|
86 | 92 | # NOTE: darcs is currently encoding agnostic and will print |
|
87 | 93 | # patch metadata byte-for-byte, even in the XML changelog. |
|
88 | 94 | etree = ElementTree() |
|
89 | 95 | fp = self._run(cmd, **kwargs) |
|
90 | 96 | etree.parse(fp) |
|
91 | 97 | self.checkexit(fp.close()) |
|
92 | 98 | return etree.getroot() |
|
93 | 99 | |
|
100 | def format(self): | |
|
101 | output, status = self.run('show', 'repo', no_files=True, | |
|
102 | repodir=self.path) | |
|
103 | self.checkexit(status) | |
|
104 | m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE) | |
|
105 | if not m: | |
|
106 | return None | |
|
107 | return ','.join(sorted(f.strip() for f in m.group(1).split(','))) | |
|
108 | ||
|
94 | 109 | def manifest(self): |
|
95 | 110 | man = [] |
|
96 | 111 | output, status = self.run('show', 'files', no_directories=True, |
|
97 | 112 | repodir=self.tmppath) |
|
98 | 113 | self.checkexit(status) |
|
99 | 114 | for line in output.split('\n'): |
|
100 | 115 | path = line[2:] |
|
101 | 116 | if path: |
|
102 | 117 | man.append(path) |
|
103 | 118 | return man |
|
104 | 119 | |
|
105 | 120 | def getheads(self): |
|
106 | 121 | return self.parents[None] |
|
107 | 122 | |
|
108 | 123 | def getcommit(self, rev): |
|
109 | 124 | elt = self.changes[rev] |
|
110 | 125 | date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') |
|
111 | 126 | desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') |
|
112 | 127 | # etree can return unicode objects for name, comment, and author, |
|
113 | 128 | # so recode() is used to ensure str objects are emitted. |
|
114 | 129 | return commit(author=self.recode(elt.get('author')), |
|
115 | 130 | date=util.datestr(date), |
|
116 | 131 | desc=self.recode(desc).strip(), |
|
117 | 132 | parents=self.parents[rev]) |
|
118 | 133 | |
|
119 | 134 | def pull(self, rev): |
|
120 | 135 | output, status = self.run('pull', self.path, all=True, |
|
121 | 136 | match='hash %s' % rev, |
|
122 | 137 | no_test=True, no_posthook=True, |
|
123 | 138 | external_merge='/bin/false', |
|
124 | 139 | repodir=self.tmppath) |
|
125 | 140 | if status: |
|
126 | 141 | if output.find('We have conflicts in') == -1: |
|
127 | 142 | self.checkexit(status, output) |
|
128 | 143 | output, status = self.run('revert', all=True, repodir=self.tmppath) |
|
129 | 144 | self.checkexit(status, output) |
|
130 | 145 | |
|
131 | 146 | def getchanges(self, rev): |
|
132 | 147 | copies = {} |
|
133 | 148 | changes = [] |
|
134 | 149 | man = None |
|
135 | 150 | for elt in self.changes[rev].find('summary').getchildren(): |
|
136 | 151 | if elt.tag in ('add_directory', 'remove_directory'): |
|
137 | 152 | continue |
|
138 | 153 | if elt.tag == 'move': |
|
139 | 154 | if man is None: |
|
140 | 155 | man = self.manifest() |
|
141 | 156 | source, dest = elt.get('from'), elt.get('to') |
|
142 | 157 | if source in man: |
|
143 | 158 | # File move |
|
144 | 159 | changes.append((source, rev)) |
|
145 | 160 | changes.append((dest, rev)) |
|
146 | 161 | copies[dest] = source |
|
147 | 162 | else: |
|
148 | 163 | # Directory move, deduce file moves from manifest |
|
149 | 164 | source = source + '/' |
|
150 | 165 | for f in man: |
|
151 | 166 | if not f.startswith(source): |
|
152 | 167 | continue |
|
153 | 168 | fdest = dest + '/' + f[len(source):] |
|
154 | 169 | changes.append((f, rev)) |
|
155 | 170 | changes.append((fdest, rev)) |
|
156 | 171 | copies[fdest] = f |
|
157 | 172 | else: |
|
158 | 173 | changes.append((elt.text.strip(), rev)) |
|
159 | 174 | self.pull(rev) |
|
160 | 175 | self.lastrev = rev |
|
161 | 176 | return sorted(changes), copies |
|
162 | 177 | |
|
163 | 178 | def getfile(self, name, rev): |
|
164 | 179 | if rev != self.lastrev: |
|
165 | 180 | raise util.Abort(_('internal calling inconsistency')) |
|
166 | 181 | path = os.path.join(self.tmppath, name) |
|
167 | 182 | data = open(path, 'rb').read() |
|
168 | 183 | mode = os.lstat(path).st_mode |
|
169 | 184 | mode = (mode & 0111) and 'x' or '' |
|
170 | 185 | return data, mode |
|
171 | 186 | |
|
172 | 187 | def gettags(self): |
|
173 | 188 | return self.tags |
@@ -1,677 +1,674 b'' | |||
|
1 | 1 | # url.py - HTTP handling for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> |
|
5 | 5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
6 | 6 | # |
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | 9 | |
|
10 | 10 | import urllib, urllib2, urlparse, httplib, os, re, socket, cStringIO |
|
11 | 11 | import __builtin__ |
|
12 | 12 | from i18n import _ |
|
13 | 13 | import keepalive, util |
|
14 | 14 | |
|
15 | 15 | def _urlunparse(scheme, netloc, path, params, query, fragment, url): |
|
16 | 16 | '''Handle cases where urlunparse(urlparse(x://)) doesn't preserve the "//"''' |
|
17 | 17 | result = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) |
|
18 | 18 | if (scheme and |
|
19 | 19 | result.startswith(scheme + ':') and |
|
20 | 20 | not result.startswith(scheme + '://') and |
|
21 | 21 | url.startswith(scheme + '://') |
|
22 | 22 | ): |
|
23 | 23 | result = scheme + '://' + result[len(scheme + ':'):] |
|
24 | 24 | return result |
|
25 | 25 | |
|
26 | 26 | def hidepassword(url): |
|
27 | 27 | '''hide user credential in a url string''' |
|
28 | 28 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) |
|
29 | 29 | netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc) |
|
30 | 30 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) |
|
31 | 31 | |
|
32 | 32 | def removeauth(url): |
|
33 | 33 | '''remove all authentication information from a url string''' |
|
34 | 34 | scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) |
|
35 | 35 | netloc = netloc[netloc.find('@')+1:] |
|
36 | 36 | return _urlunparse(scheme, netloc, path, params, query, fragment, url) |
|
37 | 37 | |
|
38 | 38 | def netlocsplit(netloc): |
|
39 | 39 | '''split [user[:passwd]@]host[:port] into 4-tuple.''' |
|
40 | 40 | |
|
41 | 41 | a = netloc.find('@') |
|
42 | 42 | if a == -1: |
|
43 | 43 | user, passwd = None, None |
|
44 | 44 | else: |
|
45 | 45 | userpass, netloc = netloc[:a], netloc[a + 1:] |
|
46 | 46 | c = userpass.find(':') |
|
47 | 47 | if c == -1: |
|
48 | 48 | user, passwd = urllib.unquote(userpass), None |
|
49 | 49 | else: |
|
50 | 50 | user = urllib.unquote(userpass[:c]) |
|
51 | 51 | passwd = urllib.unquote(userpass[c + 1:]) |
|
52 | 52 | c = netloc.find(':') |
|
53 | 53 | if c == -1: |
|
54 | 54 | host, port = netloc, None |
|
55 | 55 | else: |
|
56 | 56 | host, port = netloc[:c], netloc[c + 1:] |
|
57 | 57 | return host, port, user, passwd |
|
58 | 58 | |
|
59 | 59 | def netlocunsplit(host, port, user=None, passwd=None): |
|
60 | 60 | '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' |
|
61 | 61 | if port: |
|
62 | 62 | hostport = host + ':' + port |
|
63 | 63 | else: |
|
64 | 64 | hostport = host |
|
65 | 65 | if user: |
|
66 | 66 | quote = lambda s: urllib.quote(s, safe='') |
|
67 | 67 | if passwd: |
|
68 | 68 | userpass = quote(user) + ':' + quote(passwd) |
|
69 | 69 | else: |
|
70 | 70 | userpass = quote(user) |
|
71 | 71 | return userpass + '@' + hostport |
|
72 | 72 | return hostport |
|
73 | 73 | |
|
74 | 74 | _safe = ('abcdefghijklmnopqrstuvwxyz' |
|
75 | 75 | 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' |
|
76 | 76 | '0123456789' '_.-/') |
|
77 | 77 | _safeset = None |
|
78 | 78 | _hex = None |
|
79 | 79 | def quotepath(path): |
|
80 | 80 | '''quote the path part of a URL |
|
81 | 81 | |
|
82 | 82 | This is similar to urllib.quote, but it also tries to avoid |
|
83 | 83 | quoting things twice (inspired by wget): |
|
84 | 84 | |
|
85 | 85 | >>> quotepath('abc def') |
|
86 | 86 | 'abc%20def' |
|
87 | 87 | >>> quotepath('abc%20def') |
|
88 | 88 | 'abc%20def' |
|
89 | 89 | >>> quotepath('abc%20 def') |
|
90 | 90 | 'abc%20%20def' |
|
91 | 91 | >>> quotepath('abc def%20') |
|
92 | 92 | 'abc%20def%20' |
|
93 | 93 | >>> quotepath('abc def%2') |
|
94 | 94 | 'abc%20def%252' |
|
95 | 95 | >>> quotepath('abc def%') |
|
96 | 96 | 'abc%20def%25' |
|
97 | 97 | ''' |
|
98 | 98 | global _safeset, _hex |
|
99 | 99 | if _safeset is None: |
|
100 | 100 | _safeset = set(_safe) |
|
101 | 101 | _hex = set('abcdefABCDEF0123456789') |
|
102 | 102 | l = list(path) |
|
103 | 103 | for i in xrange(len(l)): |
|
104 | 104 | c = l[i] |
|
105 | 105 | if (c == '%' and i + 2 < len(l) and |
|
106 | 106 | l[i + 1] in _hex and l[i + 2] in _hex): |
|
107 | 107 | pass |
|
108 | 108 | elif c not in _safeset: |
|
109 | 109 | l[i] = '%%%02X' % ord(c) |
|
110 | 110 | return ''.join(l) |
|
111 | 111 | |
|
112 | 112 | class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): |
|
113 | 113 | def __init__(self, ui): |
|
114 | 114 | urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) |
|
115 | 115 | self.ui = ui |
|
116 | 116 | |
|
117 | 117 | def find_user_password(self, realm, authuri): |
|
118 | 118 | authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( |
|
119 | 119 | self, realm, authuri) |
|
120 | 120 | user, passwd = authinfo |
|
121 | 121 | if user and passwd: |
|
122 | 122 | self._writedebug(user, passwd) |
|
123 | 123 | return (user, passwd) |
|
124 | 124 | |
|
125 | 125 | if not user: |
|
126 | 126 | auth = self.readauthtoken(authuri) |
|
127 | 127 | if auth: |
|
128 | 128 | user, passwd = auth.get('username'), auth.get('password') |
|
129 | 129 | if not user or not passwd: |
|
130 | 130 | if not self.ui.interactive(): |
|
131 | 131 | raise util.Abort(_('http authorization required')) |
|
132 | 132 | |
|
133 | 133 | self.ui.write(_("http authorization required\n")) |
|
134 | 134 | self.ui.status(_("realm: %s\n") % realm) |
|
135 | 135 | if user: |
|
136 | 136 | self.ui.status(_("user: %s\n") % user) |
|
137 | 137 | else: |
|
138 | 138 | user = self.ui.prompt(_("user:"), default=None) |
|
139 | 139 | |
|
140 | 140 | if not passwd: |
|
141 | 141 | passwd = self.ui.getpass() |
|
142 | 142 | |
|
143 | 143 | self.add_password(realm, authuri, user, passwd) |
|
144 | 144 | self._writedebug(user, passwd) |
|
145 | 145 | return (user, passwd) |
|
146 | 146 | |
|
147 | 147 | def _writedebug(self, user, passwd): |
|
148 | 148 | msg = _('http auth: user %s, password %s\n') |
|
149 | 149 | self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set')) |
|
150 | 150 | |
|
151 | 151 | def readauthtoken(self, uri): |
|
152 | 152 | # Read configuration |
|
153 | 153 | config = dict() |
|
154 | 154 | for key, val in self.ui.configitems('auth'): |
|
155 | 155 | if '.' not in key: |
|
156 | 156 | self.ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) |
|
157 | 157 | continue |
|
158 | 158 | group, setting = key.split('.', 1) |
|
159 | 159 | gdict = config.setdefault(group, dict()) |
|
160 | 160 | if setting in ('username', 'cert', 'key'): |
|
161 | 161 | val = util.expandpath(val) |
|
162 | 162 | gdict[setting] = val |
|
163 | 163 | |
|
164 | 164 | # Find the best match |
|
165 | 165 | scheme, hostpath = uri.split('://', 1) |
|
166 | 166 | bestlen = 0 |
|
167 | 167 | bestauth = None |
|
168 | 168 | for auth in config.itervalues(): |
|
169 | 169 | prefix = auth.get('prefix') |
|
170 | 170 | if not prefix: |
|
171 | 171 | continue |
|
172 | 172 | p = prefix.split('://', 1) |
|
173 | 173 | if len(p) > 1: |
|
174 | 174 | schemes, prefix = [p[0]], p[1] |
|
175 | 175 | else: |
|
176 | 176 | schemes = (auth.get('schemes') or 'https').split() |
|
177 | 177 | if (prefix == '*' or hostpath.startswith(prefix)) and \ |
|
178 | 178 | len(prefix) > bestlen and scheme in schemes: |
|
179 | 179 | bestlen = len(prefix) |
|
180 | 180 | bestauth = auth |
|
181 | 181 | return bestauth |
|
182 | 182 | |
|
183 | 183 | class proxyhandler(urllib2.ProxyHandler): |
|
184 | 184 | def __init__(self, ui): |
|
185 | 185 | proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') |
|
186 | 186 | # XXX proxyauthinfo = None |
|
187 | 187 | |
|
188 | 188 | if proxyurl: |
|
189 | 189 | # proxy can be proper url or host[:port] |
|
190 | 190 | if not (proxyurl.startswith('http:') or |
|
191 | 191 | proxyurl.startswith('https:')): |
|
192 | 192 | proxyurl = 'http://' + proxyurl + '/' |
|
193 | 193 | snpqf = urlparse.urlsplit(proxyurl) |
|
194 | 194 | proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf |
|
195 | 195 | hpup = netlocsplit(proxynetloc) |
|
196 | 196 | |
|
197 | 197 | proxyhost, proxyport, proxyuser, proxypasswd = hpup |
|
198 | 198 | if not proxyuser: |
|
199 | 199 | proxyuser = ui.config("http_proxy", "user") |
|
200 | 200 | proxypasswd = ui.config("http_proxy", "passwd") |
|
201 | 201 | |
|
202 | 202 | # see if we should use a proxy for this url |
|
203 | 203 | no_list = ["localhost", "127.0.0.1"] |
|
204 | 204 | no_list.extend([p.lower() for |
|
205 | 205 | p in ui.configlist("http_proxy", "no")]) |
|
206 | 206 | no_list.extend([p.strip().lower() for |
|
207 | 207 | p in os.getenv("no_proxy", '').split(',') |
|
208 | 208 | if p.strip()]) |
|
209 | 209 | # "http_proxy.always" config is for running tests on localhost |
|
210 | 210 | if ui.configbool("http_proxy", "always"): |
|
211 | 211 | self.no_list = [] |
|
212 | 212 | else: |
|
213 | 213 | self.no_list = no_list |
|
214 | 214 | |
|
215 | 215 | proxyurl = urlparse.urlunsplit(( |
|
216 | 216 | proxyscheme, netlocunsplit(proxyhost, proxyport, |
|
217 | 217 | proxyuser, proxypasswd or ''), |
|
218 | 218 | proxypath, proxyquery, proxyfrag)) |
|
219 | 219 | proxies = {'http': proxyurl, 'https': proxyurl} |
|
220 | 220 | ui.debug('proxying through http://%s:%s\n' % |
|
221 | 221 | (proxyhost, proxyport)) |
|
222 | 222 | else: |
|
223 | 223 | proxies = {} |
|
224 | 224 | |
|
225 | 225 | # urllib2 takes proxy values from the environment and those |
|
226 | 226 | # will take precedence if found, so drop them |
|
227 | 227 | for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: |
|
228 | 228 | try: |
|
229 | 229 | if env in os.environ: |
|
230 | 230 | del os.environ[env] |
|
231 | 231 | except OSError: |
|
232 | 232 | pass |
|
233 | 233 | |
|
234 | 234 | urllib2.ProxyHandler.__init__(self, proxies) |
|
235 | 235 | self.ui = ui |
|
236 | 236 | |
|
237 | 237 | def proxy_open(self, req, proxy, type_): |
|
238 | 238 | host = req.get_host().split(':')[0] |
|
239 | 239 | if host in self.no_list: |
|
240 | 240 | return None |
|
241 | 241 | |
|
242 | 242 | # work around a bug in Python < 2.4.2 |
|
243 | 243 | # (it leaves a "\n" at the end of Proxy-authorization headers) |
|
244 | 244 | baseclass = req.__class__ |
|
245 | 245 | class _request(baseclass): |
|
246 | 246 | def add_header(self, key, val): |
|
247 | 247 | if key.lower() == 'proxy-authorization': |
|
248 | 248 | val = val.strip() |
|
249 | 249 | return baseclass.add_header(self, key, val) |
|
250 | 250 | req.__class__ = _request |
|
251 | 251 | |
|
252 | 252 | return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_) |
|
253 | 253 | |
|
254 | 254 | class httpsendfile(object): |
|
255 | 255 | """This is a wrapper around the objects returned by python's "open". |
|
256 | 256 | |
|
257 | 257 | Its purpose is to send file-like objects via HTTP and, to do so, it |
|
258 | 258 | defines a __len__ attribute to feed the Content-Length header. |
|
259 | 259 | """ |
|
260 | 260 | |
|
261 | 261 | def __init__(self, *args, **kwargs): |
|
262 | 262 | # We can't just "self._data = open(*args, **kwargs)" here because there |
|
263 | 263 | # is an "open" function defined in this module that shadows the global |
|
264 | 264 | # one |
|
265 | 265 | self._data = __builtin__.open(*args, **kwargs) |
|
266 | 266 | self.read = self._data.read |
|
267 | 267 | self.seek = self._data.seek |
|
268 | 268 | self.close = self._data.close |
|
269 | 269 | self.write = self._data.write |
|
270 | 270 | |
|
271 | 271 | def __len__(self): |
|
272 | 272 | return os.fstat(self._data.fileno()).st_size |
|
273 | 273 | |
|
274 | 274 | def _gen_sendfile(connection): |
|
275 | 275 | def _sendfile(self, data): |
|
276 | 276 | # send a file |
|
277 | 277 | if isinstance(data, httpsendfile): |
|
278 | 278 | # if auth required, some data sent twice, so rewind here |
|
279 | 279 | data.seek(0) |
|
280 | 280 | for chunk in util.filechunkiter(data): |
|
281 | 281 | connection.send(self, chunk) |
|
282 | 282 | else: |
|
283 | 283 | connection.send(self, data) |
|
284 | 284 | return _sendfile |
|
285 | 285 | |
|
286 | 286 | has_https = hasattr(urllib2, 'HTTPSHandler') |
|
287 | 287 | if has_https: |
|
288 | 288 | try: |
|
289 | 289 | # avoid using deprecated/broken FakeSocket in python 2.6 |
|
290 | 290 | import ssl |
|
291 | 291 | _ssl_wrap_socket = ssl.wrap_socket |
|
292 | 292 | CERT_REQUIRED = ssl.CERT_REQUIRED |
|
293 | 293 | except ImportError: |
|
294 | 294 | CERT_REQUIRED = 2 |
|
295 | 295 | |
|
296 | 296 | def _ssl_wrap_socket(sock, key_file, cert_file, |
|
297 | 297 | cert_reqs=CERT_REQUIRED, ca_certs=None): |
|
298 | 298 | if ca_certs: |
|
299 | 299 | raise util.Abort(_( |
|
300 | 300 | 'certificate checking requires Python 2.6')) |
|
301 | 301 | |
|
302 | 302 | ssl = socket.ssl(sock, key_file, cert_file) |
|
303 | 303 | return httplib.FakeSocket(sock, ssl) |
|
304 | 304 | |
|
305 | 305 | try: |
|
306 | 306 | _create_connection = socket.create_connection |
|
307 | 307 | except AttributeError: |
|
308 | 308 | _GLOBAL_DEFAULT_TIMEOUT = object() |
|
309 | 309 | |
|
310 | 310 | def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, |
|
311 | 311 | source_address=None): |
|
312 | 312 | # lifted from Python 2.6 |
|
313 | 313 | |
|
314 | 314 | msg = "getaddrinfo returns an empty list" |
|
315 | 315 | host, port = address |
|
316 | 316 | for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): |
|
317 | 317 | af, socktype, proto, canonname, sa = res |
|
318 | 318 | sock = None |
|
319 | 319 | try: |
|
320 | 320 | sock = socket.socket(af, socktype, proto) |
|
321 | 321 | if timeout is not _GLOBAL_DEFAULT_TIMEOUT: |
|
322 | 322 | sock.settimeout(timeout) |
|
323 | 323 | if source_address: |
|
324 | 324 | sock.bind(source_address) |
|
325 | 325 | sock.connect(sa) |
|
326 | 326 | return sock |
|
327 | 327 | |
|
328 | 328 | except socket.error, msg: |
|
329 | 329 | if sock is not None: |
|
330 | 330 | sock.close() |
|
331 | 331 | |
|
332 | 332 | raise socket.error, msg |
|
333 | 333 | |
|
334 | 334 | class httpconnection(keepalive.HTTPConnection): |
|
335 | 335 | # must be able to send big bundle as stream. |
|
336 | 336 | send = _gen_sendfile(keepalive.HTTPConnection) |
|
337 | 337 | |
|
338 | 338 | def connect(self): |
|
339 | 339 | if has_https and self.realhostport: # use CONNECT proxy |
|
340 | 340 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
341 | 341 | self.sock.connect((self.host, self.port)) |
|
342 | 342 | if _generic_proxytunnel(self): |
|
343 | 343 | # we do not support client x509 certificates |
|
344 | 344 | self.sock = _ssl_wrap_socket(self.sock, None, None) |
|
345 | 345 | else: |
|
346 | 346 | keepalive.HTTPConnection.connect(self) |
|
347 | 347 | |
|
348 | 348 | def getresponse(self): |
|
349 | 349 | proxyres = getattr(self, 'proxyres', None) |
|
350 | 350 | if proxyres: |
|
351 | 351 | if proxyres.will_close: |
|
352 | 352 | self.close() |
|
353 | 353 | self.proxyres = None |
|
354 | 354 | return proxyres |
|
355 | 355 | return keepalive.HTTPConnection.getresponse(self) |
|
356 | 356 | |
|
357 | 357 | # general transaction handler to support different ways to handle |
|
358 | 358 | # HTTPS proxying before and after Python 2.6.3. |
|
359 | 359 | def _generic_start_transaction(handler, h, req): |
|
360 | 360 | if hasattr(req, '_tunnel_host') and req._tunnel_host: |
|
361 | 361 | tunnel_host = req._tunnel_host |
|
362 | 362 | if tunnel_host[:7] not in ['http://', 'https:/']: |
|
363 | 363 | tunnel_host = 'https://' + tunnel_host |
|
364 | 364 | new_tunnel = True |
|
365 | 365 | else: |
|
366 | 366 | tunnel_host = req.get_selector() |
|
367 | 367 | new_tunnel = False |
|
368 | 368 | |
|
369 | 369 | if new_tunnel or tunnel_host == req.get_full_url(): # has proxy |
|
370 | 370 | urlparts = urlparse.urlparse(tunnel_host) |
|
371 | 371 | if new_tunnel or urlparts[0] == 'https': # only use CONNECT for HTTPS |
|
372 | 372 | realhostport = urlparts[1] |
|
373 | 373 | if realhostport[-1] == ']' or ':' not in realhostport: |
|
374 | 374 | realhostport += ':443' |
|
375 | 375 | |
|
376 | 376 | h.realhostport = realhostport |
|
377 | 377 | h.headers = req.headers.copy() |
|
378 | 378 | h.headers.update(handler.parent.addheaders) |
|
379 | 379 | return |
|
380 | 380 | |
|
381 | 381 | h.realhostport = None |
|
382 | 382 | h.headers = None |
|
383 | 383 | |
|
384 | 384 | def _generic_proxytunnel(self): |
|
385 | 385 | proxyheaders = dict( |
|
386 | 386 | [(x, self.headers[x]) for x in self.headers |
|
387 | 387 | if x.lower().startswith('proxy-')]) |
|
388 | 388 | self._set_hostport(self.host, self.port) |
|
389 | 389 | self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport) |
|
390 | 390 | for header in proxyheaders.iteritems(): |
|
391 | 391 | self.send('%s: %s\r\n' % header) |
|
392 | 392 | self.send('\r\n') |
|
393 | 393 | |
|
394 | 394 | # majority of the following code is duplicated from |
|
395 | 395 | # httplib.HTTPConnection as there are no adequate places to |
|
396 | 396 | # override functions to provide the needed functionality |
|
397 | 397 | res = self.response_class(self.sock, |
|
398 | 398 | strict=self.strict, |
|
399 | 399 | method=self._method) |
|
400 | 400 | |
|
401 | 401 | while True: |
|
402 | 402 | version, status, reason = res._read_status() |
|
403 | 403 | if status != httplib.CONTINUE: |
|
404 | 404 | break |
|
405 | 405 | while True: |
|
406 | 406 | skip = res.fp.readline().strip() |
|
407 | 407 | if not skip: |
|
408 | 408 | break |
|
409 | 409 | res.status = status |
|
410 | 410 | res.reason = reason.strip() |
|
411 | 411 | |
|
412 | 412 | if res.status == 200: |
|
413 | 413 | while True: |
|
414 | 414 | line = res.fp.readline() |
|
415 | 415 | if line == '\r\n': |
|
416 | 416 | break |
|
417 | 417 | return True |
|
418 | 418 | |
|
419 | 419 | if version == 'HTTP/1.0': |
|
420 | 420 | res.version = 10 |
|
421 | 421 | elif version.startswith('HTTP/1.'): |
|
422 | 422 | res.version = 11 |
|
423 | 423 | elif version == 'HTTP/0.9': |
|
424 | 424 | res.version = 9 |
|
425 | 425 | else: |
|
426 | 426 | raise httplib.UnknownProtocol(version) |
|
427 | 427 | |
|
428 | 428 | if res.version == 9: |
|
429 | 429 | res.length = None |
|
430 | 430 | res.chunked = 0 |
|
431 | 431 | res.will_close = 1 |
|
432 | 432 | res.msg = httplib.HTTPMessage(cStringIO.StringIO()) |
|
433 | 433 | return False |
|
434 | 434 | |
|
435 | 435 | res.msg = httplib.HTTPMessage(res.fp) |
|
436 | 436 | res.msg.fp = None |
|
437 | 437 | |
|
438 | 438 | # are we using the chunked-style of transfer encoding? |
|
439 | 439 | trenc = res.msg.getheader('transfer-encoding') |
|
440 | 440 | if trenc and trenc.lower() == "chunked": |
|
441 | 441 | res.chunked = 1 |
|
442 | 442 | res.chunk_left = None |
|
443 | 443 | else: |
|
444 | 444 | res.chunked = 0 |
|
445 | 445 | |
|
446 | 446 | # will the connection close at the end of the response? |
|
447 | 447 | res.will_close = res._check_close() |
|
448 | 448 | |
|
449 | 449 | # do we have a Content-Length? |
|
450 | 450 | # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" |
|
451 | 451 | length = res.msg.getheader('content-length') |
|
452 | 452 | if length and not res.chunked: |
|
453 | 453 | try: |
|
454 | 454 | res.length = int(length) |
|
455 | 455 | except ValueError: |
|
456 | 456 | res.length = None |
|
457 | 457 | else: |
|
458 | 458 | if res.length < 0: # ignore nonsensical negative lengths |
|
459 | 459 | res.length = None |
|
460 | 460 | else: |
|
461 | 461 | res.length = None |
|
462 | 462 | |
|
463 | 463 | # does the body have a fixed length? (of zero) |
|
464 | 464 | if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or |
|
465 | 465 | 100 <= status < 200 or # 1xx codes |
|
466 | 466 | res._method == 'HEAD'): |
|
467 | 467 | res.length = 0 |
|
468 | 468 | |
|
469 | 469 | # if the connection remains open, and we aren't using chunked, and |
|
470 | 470 | # a content-length was not provided, then assume that the connection |
|
471 | 471 | # WILL close. |
|
472 | 472 | if (not res.will_close and |
|
473 | 473 | not res.chunked and |
|
474 | 474 | res.length is None): |
|
475 | 475 | res.will_close = 1 |
|
476 | 476 | |
|
477 | 477 | self.proxyres = res |
|
478 | 478 | |
|
479 | 479 | return False |
|
480 | 480 | |
|
481 | 481 | class httphandler(keepalive.HTTPHandler): |
|
482 | 482 | def http_open(self, req): |
|
483 | 483 | return self.do_open(httpconnection, req) |
|
484 | 484 | |
|
485 | 485 | def _start_transaction(self, h, req): |
|
486 | 486 | _generic_start_transaction(self, h, req) |
|
487 | 487 | return keepalive.HTTPHandler._start_transaction(self, h, req) |
|
488 | 488 | |
|
489 | def __del__(self): | |
|
490 | self.close_all() | |
|
491 | ||
|
492 | 489 | if has_https: |
|
493 | 490 | class BetterHTTPS(httplib.HTTPSConnection): |
|
494 | 491 | send = keepalive.safesend |
|
495 | 492 | |
|
496 | 493 | def connect(self): |
|
497 | 494 | if hasattr(self, 'ui'): |
|
498 | 495 | cacerts = self.ui.config('web', 'cacerts') |
|
499 | 496 | else: |
|
500 | 497 | cacerts = None |
|
501 | 498 | |
|
502 | 499 | if cacerts: |
|
503 | 500 | sock = _create_connection((self.host, self.port)) |
|
504 | 501 | self.sock = _ssl_wrap_socket(sock, self.key_file, |
|
505 | 502 | self.cert_file, cert_reqs=CERT_REQUIRED, |
|
506 | 503 | ca_certs=cacerts) |
|
507 | 504 | self.ui.debug(_('server identity verification succeeded\n')) |
|
508 | 505 | else: |
|
509 | 506 | httplib.HTTPSConnection.connect(self) |
|
510 | 507 | |
|
511 | 508 | class httpsconnection(BetterHTTPS): |
|
512 | 509 | response_class = keepalive.HTTPResponse |
|
513 | 510 | # must be able to send big bundle as stream. |
|
514 | 511 | send = _gen_sendfile(BetterHTTPS) |
|
515 | 512 | getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection) |
|
516 | 513 | |
|
517 | 514 | def connect(self): |
|
518 | 515 | if self.realhostport: # use CONNECT proxy |
|
519 | 516 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
520 | 517 | self.sock.connect((self.host, self.port)) |
|
521 | 518 | if _generic_proxytunnel(self): |
|
522 | 519 | self.sock = _ssl_wrap_socket(self.sock, self.cert_file, |
|
523 | 520 | self.key_file) |
|
524 | 521 | else: |
|
525 | 522 | BetterHTTPS.connect(self) |
|
526 | 523 | |
|
527 | 524 | class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler): |
|
528 | 525 | def __init__(self, ui): |
|
529 | 526 | keepalive.KeepAliveHandler.__init__(self) |
|
530 | 527 | urllib2.HTTPSHandler.__init__(self) |
|
531 | 528 | self.ui = ui |
|
532 | 529 | self.pwmgr = passwordmgr(self.ui) |
|
533 | 530 | |
|
534 | 531 | def _start_transaction(self, h, req): |
|
535 | 532 | _generic_start_transaction(self, h, req) |
|
536 | 533 | return keepalive.KeepAliveHandler._start_transaction(self, h, req) |
|
537 | 534 | |
|
538 | 535 | def https_open(self, req): |
|
539 | 536 | self.auth = self.pwmgr.readauthtoken(req.get_full_url()) |
|
540 | 537 | return self.do_open(self._makeconnection, req) |
|
541 | 538 | |
|
542 | 539 | def _makeconnection(self, host, port=None, *args, **kwargs): |
|
543 | 540 | keyfile = None |
|
544 | 541 | certfile = None |
|
545 | 542 | |
|
546 | 543 | if len(args) >= 1: # key_file |
|
547 | 544 | keyfile = args[0] |
|
548 | 545 | if len(args) >= 2: # cert_file |
|
549 | 546 | certfile = args[1] |
|
550 | 547 | args = args[2:] |
|
551 | 548 | |
|
552 | 549 | # if the user has specified different key/cert files in |
|
553 | 550 | # hgrc, we prefer these |
|
554 | 551 | if self.auth and 'key' in self.auth and 'cert' in self.auth: |
|
555 | 552 | keyfile = self.auth['key'] |
|
556 | 553 | certfile = self.auth['cert'] |
|
557 | 554 | |
|
558 | 555 | conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs) |
|
559 | 556 | conn.ui = self.ui |
|
560 | 557 | return conn |
|
561 | 558 | |
|
562 | 559 | class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler): |
|
563 | 560 | def __init__(self, *args, **kwargs): |
|
564 | 561 | urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs) |
|
565 | 562 | self.retried_req = None |
|
566 | 563 | |
|
567 | 564 | def reset_retry_count(self): |
|
568 | 565 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop |
|
569 | 566 | # forever. We disable reset_retry_count completely and reset in |
|
570 | 567 | # http_error_auth_reqed instead. |
|
571 | 568 | pass |
|
572 | 569 | |
|
573 | 570 | def http_error_auth_reqed(self, auth_header, host, req, headers): |
|
574 | 571 | # Reset the retry counter once for each request. |
|
575 | 572 | if req is not self.retried_req: |
|
576 | 573 | self.retried_req = req |
|
577 | 574 | self.retried = 0 |
|
578 | 575 | # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if |
|
579 | 576 | # it doesn't know about the auth type requested. This can happen if |
|
580 | 577 | # somebody is using BasicAuth and types a bad password. |
|
581 | 578 | try: |
|
582 | 579 | return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed( |
|
583 | 580 | self, auth_header, host, req, headers) |
|
584 | 581 | except ValueError, inst: |
|
585 | 582 | arg = inst.args[0] |
|
586 | 583 | if arg.startswith("AbstractDigestAuthHandler doesn't know "): |
|
587 | 584 | return |
|
588 | 585 | raise |
|
589 | 586 | |
|
590 | 587 | class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler): |
|
591 | 588 | def __init__(self, *args, **kwargs): |
|
592 | 589 | urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) |
|
593 | 590 | self.retried_req = None |
|
594 | 591 | |
|
595 | 592 | def reset_retry_count(self): |
|
596 | 593 | # Python 2.6.5 will call this on 401 or 407 errors and thus loop |
|
597 | 594 | # forever. We disable reset_retry_count completely and reset in |
|
598 | 595 | # http_error_auth_reqed instead. |
|
599 | 596 | pass |
|
600 | 597 | |
|
601 | 598 | def http_error_auth_reqed(self, auth_header, host, req, headers): |
|
602 | 599 | # Reset the retry counter once for each request. |
|
603 | 600 | if req is not self.retried_req: |
|
604 | 601 | self.retried_req = req |
|
605 | 602 | self.retried = 0 |
|
606 | 603 | return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( |
|
607 | 604 | self, auth_header, host, req, headers) |
|
608 | 605 | |
|
609 | 606 | def getauthinfo(path): |
|
610 | 607 | scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) |
|
611 | 608 | if not urlpath: |
|
612 | 609 | urlpath = '/' |
|
613 | 610 | if scheme != 'file': |
|
614 | 611 | # XXX: why are we quoting the path again with some smart |
|
615 | 612 | # heuristic here? Anyway, it cannot be done with file:// |
|
616 | 613 | # urls since path encoding is os/fs dependent (see |
|
617 | 614 | # urllib.pathname2url() for details). |
|
618 | 615 | urlpath = quotepath(urlpath) |
|
619 | 616 | host, port, user, passwd = netlocsplit(netloc) |
|
620 | 617 | |
|
621 | 618 | # urllib cannot handle URLs with embedded user or passwd |
|
622 | 619 | url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), |
|
623 | 620 | urlpath, query, frag)) |
|
624 | 621 | if user: |
|
625 | 622 | netloc = host |
|
626 | 623 | if port: |
|
627 | 624 | netloc += ':' + port |
|
628 | 625 | # Python < 2.4.3 uses only the netloc to search for a password |
|
629 | 626 | authinfo = (None, (url, netloc), user, passwd or '') |
|
630 | 627 | else: |
|
631 | 628 | authinfo = None |
|
632 | 629 | return url, authinfo |
|
633 | 630 | |
|
634 | 631 | handlerfuncs = [] |
|
635 | 632 | |
|
636 | 633 | def opener(ui, authinfo=None): |
|
637 | 634 | ''' |
|
638 | 635 | construct an opener suitable for urllib2 |
|
639 | 636 | authinfo will be added to the password manager |
|
640 | 637 | ''' |
|
641 | 638 | handlers = [httphandler()] |
|
642 | 639 | if has_https: |
|
643 | 640 | handlers.append(httpshandler(ui)) |
|
644 | 641 | |
|
645 | 642 | handlers.append(proxyhandler(ui)) |
|
646 | 643 | |
|
647 | 644 | passmgr = passwordmgr(ui) |
|
648 | 645 | if authinfo is not None: |
|
649 | 646 | passmgr.add_password(*authinfo) |
|
650 | 647 | user, passwd = authinfo[2:4] |
|
651 | 648 | ui.debug('http auth: user %s, password %s\n' % |
|
652 | 649 | (user, passwd and '*' * len(passwd) or 'not set')) |
|
653 | 650 | |
|
654 | 651 | handlers.extend((httpbasicauthhandler(passmgr), |
|
655 | 652 | httpdigestauthhandler(passmgr))) |
|
656 | 653 | handlers.extend([h(ui, passmgr) for h in handlerfuncs]) |
|
657 | 654 | opener = urllib2.build_opener(*handlers) |
|
658 | 655 | |
|
659 | 656 | # 1.0 here is the _protocol_ version |
|
660 | 657 | opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] |
|
661 | 658 | opener.addheaders.append(('Accept', 'application/mercurial-0.1')) |
|
662 | 659 | return opener |
|
663 | 660 | |
|
664 | 661 | scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') |
|
665 | 662 | |
|
666 | 663 | def open(ui, url, data=None): |
|
667 | 664 | scheme = None |
|
668 | 665 | m = scheme_re.search(url) |
|
669 | 666 | if m: |
|
670 | 667 | scheme = m.group(1).lower() |
|
671 | 668 | if not scheme: |
|
672 | 669 | path = util.normpath(os.path.abspath(url)) |
|
673 | 670 | url = 'file://' + urllib.pathname2url(path) |
|
674 | 671 | authinfo = None |
|
675 | 672 | else: |
|
676 | 673 | url, authinfo = getauthinfo(url) |
|
677 | 674 | return opener(ui, authinfo).open(url, data) |
@@ -1,60 +1,87 b'' | |||
|
1 | 1 | $ echo "[extensions]" >> $HGRCPATH |
|
2 | 2 | $ echo "bookmarks=" >> $HGRCPATH |
|
3 | 3 | $ echo "mq=" >> $HGRCPATH |
|
4 | 4 | |
|
5 | 5 | $ hg init |
|
6 | 6 | |
|
7 | 7 | $ echo qqq>qqq.txt |
|
8 | 8 | |
|
9 | 9 | add file |
|
10 | 10 | |
|
11 | 11 | $ hg add |
|
12 | 12 | adding qqq.txt |
|
13 | 13 | |
|
14 | 14 | commit first revision |
|
15 | 15 | |
|
16 | 16 | $ hg ci -m 1 |
|
17 | 17 | |
|
18 | 18 | set bookmark |
|
19 | 19 | |
|
20 | 20 | $ hg book test |
|
21 | 21 | |
|
22 | 22 | $ echo www>>qqq.txt |
|
23 | 23 | |
|
24 | 24 | commit second revision |
|
25 | 25 | |
|
26 | 26 | $ hg ci -m 2 |
|
27 | 27 | |
|
28 | 28 | set bookmark |
|
29 | 29 | |
|
30 | 30 | $ hg book test2 |
|
31 | 31 | |
|
32 | 32 | update to -2 |
|
33 | 33 | |
|
34 | 34 | $ hg update -r -2 |
|
35 | 35 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
36 | 36 | |
|
37 | 37 | $ echo eee>>qqq.txt |
|
38 | 38 | |
|
39 | 39 | commit new head |
|
40 | 40 | |
|
41 | 41 | $ hg ci -m 3 |
|
42 | 42 | created new head |
|
43 | 43 | |
|
44 | 44 | bookmarks updated? |
|
45 | 45 | |
|
46 | 46 | $ hg book |
|
47 | 47 | test 1:25e1ee7a0081 |
|
48 | 48 | test2 1:25e1ee7a0081 |
|
49 | 49 | |
|
50 | 50 | strip to revision 1 |
|
51 | 51 | |
|
52 | 52 | $ hg strip 1 |
|
53 | 53 | saved backup bundle to * (glob) |
|
54 | 54 | |
|
55 | 55 | list bookmarks |
|
56 | 56 | |
|
57 | 57 | $ hg book |
|
58 | 58 | * test 1:8cf31af87a2b |
|
59 | 59 | * test2 1:8cf31af87a2b |
|
60 | 60 | |
|
61 | immediate rollback and reentrancy issue | |
|
62 | ||
|
63 | $ echo "mq=!" >> $HGRCPATH | |
|
64 | $ hg init repo | |
|
65 | $ cd repo | |
|
66 | $ echo a > a | |
|
67 | $ hg ci -Am adda | |
|
68 | adding a | |
|
69 | $ echo b > b | |
|
70 | $ hg ci -Am addb | |
|
71 | adding b | |
|
72 | $ hg bookmarks markb | |
|
73 | $ hg rollback | |
|
74 | rolling back to revision 0 (undo commit) | |
|
75 | ||
|
76 | are you there? | |
|
77 | ||
|
78 | $ hg bookmarks | |
|
79 | no bookmarks set | |
|
80 | ||
|
81 | can you be added again? | |
|
82 | ||
|
83 | $ hg bookmarks markb | |
|
84 | $ hg bookmarks | |
|
85 | * markb 0:07f494440405 | |
|
86 | $ cd .. | |
|
87 |
@@ -1,80 +1,83 b'' | |||
|
1 | 1 | #!/bin/sh |
|
2 | 2 | |
|
3 | 3 | "$TESTDIR/hghave" darcs || exit 80 |
|
4 | 4 | |
|
5 | 5 | echo "[extensions]" >> $HGRCPATH |
|
6 | 6 | echo "convert=" >> $HGRCPATH |
|
7 | 7 | echo 'graphlog =' >> $HGRCPATH |
|
8 | 8 | |
|
9 | 9 | DARCS_EMAIL='test@example.org'; export DARCS_EMAIL |
|
10 | 10 | HOME=`pwd`/do_not_use_HOME_darcs; export HOME |
|
11 | 11 | |
|
12 | 12 | # skip if we can't import elementtree |
|
13 | 13 | mkdir dummy |
|
14 | 14 | mkdir dummy/_darcs |
|
15 | 15 | if hg convert dummy 2>&1 | grep ElementTree > /dev/null; then |
|
16 | 16 | echo 'skipped: missing feature: elementtree module' |
|
17 | 17 | exit 80 |
|
18 | 18 | fi |
|
19 | 19 | |
|
20 | echo '% try converting darcs1 repository' | |
|
21 | hg convert -s darcs "$TESTDIR/darcs/darcs1" 2>&1 | grep darcs-1.0 | |
|
22 | ||
|
20 | 23 | echo % initialize darcs repo |
|
21 | 24 | mkdir darcs-repo |
|
22 | 25 | cd darcs-repo |
|
23 | 26 | darcs init |
|
24 | 27 | echo a > a |
|
25 | 28 | darcs record -a -l -m p0 |
|
26 | 29 | cd .. |
|
27 | 30 | |
|
28 | 31 | echo % branch and update |
|
29 | 32 | darcs get darcs-repo darcs-clone >/dev/null |
|
30 | 33 | cd darcs-clone |
|
31 | 34 | echo c >> a |
|
32 | 35 | echo c > c |
|
33 | 36 | darcs record -a -l -m p1.1 |
|
34 | 37 | cd .. |
|
35 | 38 | |
|
36 | 39 | echo % update source |
|
37 | 40 | cd darcs-repo |
|
38 | 41 | echo b >> a |
|
39 | 42 | echo b > b |
|
40 | 43 | darcs record -a -l -m p1.2 |
|
41 | 44 | |
|
42 | 45 | echo % merge branch |
|
43 | 46 | darcs pull -a ../darcs-clone |
|
44 | 47 | sleep 1 |
|
45 | 48 | echo e > a |
|
46 | 49 | echo f > f |
|
47 | 50 | mkdir dir |
|
48 | 51 | echo d > dir/d |
|
49 | 52 | echo d > dir/d2 |
|
50 | 53 | darcs record -a -l -m p2 |
|
51 | 54 | |
|
52 | 55 | echo % test file and directory move |
|
53 | 56 | darcs mv f ff |
|
54 | 57 | # Test remove + move |
|
55 | 58 | darcs remove dir/d2 |
|
56 | 59 | rm dir/d2 |
|
57 | 60 | darcs mv dir dir2 |
|
58 | 61 | darcs record -a -l -m p3 |
|
59 | 62 | |
|
60 | 63 | echo % test utf-8 commit message and author |
|
61 | 64 | echo g > g |
|
62 | 65 | # darcs is encoding agnostic, so it takes whatever bytes it's given |
|
63 | 66 | darcs record -a -l -m 'p4: desc ñ' -A 'author ñ' |
|
64 | 67 | |
|
65 | 68 | glog() |
|
66 | 69 | { |
|
67 | 70 | HGENCODING=utf-8 hg glog --template '{rev} "{desc|firstline}" ({author}) files: {files}\n' "$@" |
|
68 | 71 | } |
|
69 | 72 | |
|
70 | 73 | cd .. |
|
71 | 74 | hg convert darcs-repo darcs-repo-hg |
|
72 | 75 | # The converter does not currently handle patch conflicts very well. |
|
73 | 76 | # When they occur, it reverts *all* changes and moves forward, |
|
74 | 77 | # letting the conflict resolving patch fix collisions. |
|
75 | 78 | # Unfortunately, non-conflicting changes, like the addition of the |
|
76 | 79 | # "c" file in p1.1 patch are reverted too. |
|
77 | 80 | # Just to say that manifest not listing "c" here is a bug. |
|
78 | 81 | glog -R darcs-repo-hg |
|
79 | 82 | hg up -q -R darcs-repo-hg |
|
80 | 83 | hg -R darcs-repo-hg manifest --debug |
@@ -1,43 +1,45 b'' | |||
|
1 | % try converting darcs1 repository | |
|
2 | darcs-1.0 repository format is unsupported, please upgrade | |
|
1 | 3 | % initialize darcs repo |
|
2 | 4 | Finished recording patch 'p0' |
|
3 | 5 | % branch and update |
|
4 | 6 | Finished recording patch 'p1.1' |
|
5 | 7 | % update source |
|
6 | 8 | Finished recording patch 'p1.2' |
|
7 | 9 | % merge branch |
|
8 | 10 | Backing up ./a(-darcs-backup0) |
|
9 | 11 | We have conflicts in the following files: |
|
10 | 12 | ./a |
|
11 | 13 | Finished pulling and applying. |
|
12 | 14 | Finished recording patch 'p2' |
|
13 | 15 | % test file and directory move |
|
14 | 16 | Finished recording patch 'p3' |
|
15 | 17 | % test utf-8 commit message and author |
|
16 | 18 | Finished recording patch 'p4: desc ñ' |
|
17 | 19 | initializing destination darcs-repo-hg repository |
|
18 | 20 | scanning source... |
|
19 | 21 | sorting... |
|
20 | 22 | converting... |
|
21 | 23 | 5 p0 |
|
22 | 24 | 4 p1.2 |
|
23 | 25 | 3 p1.1 |
|
24 | 26 | 2 p2 |
|
25 | 27 | 1 p3 |
|
26 | 28 | 0 p4: desc ? |
|
27 | 29 | o 5 "p4: desc ñ" (author ñ) files: g |
|
28 | 30 | | |
|
29 | 31 | o 4 "p3" (test@example.org) files: dir/d dir/d2 dir2/d f ff |
|
30 | 32 | | |
|
31 | 33 | o 3 "p2" (test@example.org) files: a dir/d dir/d2 f |
|
32 | 34 | | |
|
33 | 35 | o 2 "p1.1" (test@example.org) files: |
|
34 | 36 | | |
|
35 | 37 | o 1 "p1.2" (test@example.org) files: a b |
|
36 | 38 | | |
|
37 | 39 | o 0 "p0" (test@example.org) files: a |
|
38 | 40 | |
|
39 | 41 | 7225b30cdf38257d5cc7780772c051b6f33e6d6b 644 a |
|
40 | 42 | 1e88685f5ddec574a34c70af492f95b6debc8741 644 b |
|
41 | 43 | 37406831adc447ec2385014019599dfec953c806 644 dir2/d |
|
42 | 44 | b783a337463792a5c7d548ad85a7d3253c16ba8c 644 ff |
|
43 | 45 | 0973eb1b2ecc4de7fafe7447ce1b7462108b4848 644 g |
General Comments 0
You need to be logged in to leave comments.
Login now