Show More
@@ -1,478 +1,478 b'' | |||
|
1 | 1 | # git.py - git support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | from mercurial import ( |
|
13 | 13 | config, |
|
14 | 14 | error, |
|
15 | 15 | node as nodemod, |
|
16 | 16 | ) |
|
17 | 17 | |
|
18 | 18 | from . import ( |
|
19 | 19 | common, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | class submodule(object): |
|
23 | 23 | def __init__(self, path, node, url): |
|
24 | 24 | self.path = path |
|
25 | 25 | self.node = node |
|
26 | 26 | self.url = url |
|
27 | 27 | |
|
28 | 28 | def hgsub(self): |
|
29 | 29 | return "%s = [git]%s" % (self.path, self.url) |
|
30 | 30 | |
|
31 | 31 | def hgsubstate(self): |
|
32 | 32 | return "%s %s" % (self.node, self.path) |
|
33 | 33 | |
|
34 | 34 | # Keys in extra fields that should not be copied if the user requests. |
|
35 | 35 | bannedextrakeys = { |
|
36 | 36 | # Git commit object built-ins. |
|
37 | 37 | 'tree', |
|
38 | 38 | 'parent', |
|
39 | 39 | 'author', |
|
40 | 40 | 'committer', |
|
41 | 41 | # Mercurial built-ins. |
|
42 | 42 | 'branch', |
|
43 | 43 | 'close', |
|
44 | 44 | } |
|
45 | 45 | |
|
46 | 46 | class convert_git(common.converter_source, common.commandline): |
|
47 | 47 | # Windows does not support GIT_DIR= construct while other systems |
|
48 | 48 | # cannot remove environment variable. Just assume none have |
|
49 | 49 | # both issues. |
|
50 | 50 | |
|
51 | 51 | def _gitcmd(self, cmd, *args, **kwargs): |
|
52 | 52 | return cmd('--git-dir=%s' % self.path, *args, **kwargs) |
|
53 | 53 | |
|
54 | 54 | def gitrun0(self, *args, **kwargs): |
|
55 | 55 | return self._gitcmd(self.run0, *args, **kwargs) |
|
56 | 56 | |
|
57 | 57 | def gitrun(self, *args, **kwargs): |
|
58 | 58 | return self._gitcmd(self.run, *args, **kwargs) |
|
59 | 59 | |
|
60 | 60 | def gitrunlines0(self, *args, **kwargs): |
|
61 | 61 | return self._gitcmd(self.runlines0, *args, **kwargs) |
|
62 | 62 | |
|
63 | 63 | def gitrunlines(self, *args, **kwargs): |
|
64 | 64 | return self._gitcmd(self.runlines, *args, **kwargs) |
|
65 | 65 | |
|
66 | 66 | def gitpipe(self, *args, **kwargs): |
|
67 | 67 | return self._gitcmd(self._run3, *args, **kwargs) |
|
68 | 68 | |
|
69 | 69 | def __init__(self, ui, repotype, path, revs=None): |
|
70 | 70 | super(convert_git, self).__init__(ui, repotype, path, revs=revs) |
|
71 | 71 | common.commandline.__init__(self, ui, 'git') |
|
72 | 72 | |
|
73 | 73 | # Pass an absolute path to git to prevent from ever being interpreted |
|
74 | 74 | # as a URL |
|
75 | 75 | path = os.path.abspath(path) |
|
76 | 76 | |
|
77 | 77 | if os.path.isdir(path + "/.git"): |
|
78 | 78 | path += "/.git" |
|
79 | 79 | if not os.path.exists(path + "/objects"): |
|
80 | 80 | raise common.NoRepo(_("%s does not look like a Git repository") % |
|
81 | 81 | path) |
|
82 | 82 | |
|
83 | 83 | # The default value (50) is based on the default for 'git diff'. |
|
84 | 84 | similarity = ui.configint('convert', 'git.similarity') |
|
85 | 85 | if similarity < 0 or similarity > 100: |
|
86 | 86 | raise error.Abort(_('similarity must be between 0 and 100')) |
|
87 | 87 | if similarity > 0: |
|
88 | 88 | self.simopt = ['-C%d%%' % similarity] |
|
89 | 89 | findcopiesharder = ui.configbool('convert', 'git.findcopiesharder') |
|
90 | 90 | if findcopiesharder: |
|
91 | 91 | self.simopt.append('--find-copies-harder') |
|
92 | 92 | |
|
93 | 93 | renamelimit = ui.configint('convert', 'git.renamelimit') |
|
94 | 94 | self.simopt.append('-l%d' % renamelimit) |
|
95 | 95 | else: |
|
96 | 96 | self.simopt = [] |
|
97 | 97 | |
|
98 | 98 | common.checktool('git', 'git') |
|
99 | 99 | |
|
100 | 100 | self.path = path |
|
101 | 101 | self.submodules = [] |
|
102 | 102 | |
|
103 | 103 | self.catfilepipe = self.gitpipe('cat-file', '--batch') |
|
104 | 104 | |
|
105 | 105 | self.copyextrakeys = self.ui.configlist('convert', 'git.extrakeys') |
|
106 | 106 | banned = set(self.copyextrakeys) & bannedextrakeys |
|
107 | 107 | if banned: |
|
108 | 108 | raise error.Abort(_('copying of extra key is forbidden: %s') % |
|
109 | 109 | _(', ').join(sorted(banned))) |
|
110 | 110 | |
|
111 | 111 | committeractions = self.ui.configlist('convert', 'git.committeractions') |
|
112 | 112 | |
|
113 | 113 | messagedifferent = None |
|
114 | 114 | messagealways = None |
|
115 | 115 | for a in committeractions: |
|
116 | 116 | if a.startswith(('messagedifferent', 'messagealways')): |
|
117 | 117 | k = a |
|
118 | 118 | v = None |
|
119 | 119 | if '=' in a: |
|
120 | 120 | k, v = a.split('=', 1) |
|
121 | 121 | |
|
122 | 122 | if k == 'messagedifferent': |
|
123 | 123 | messagedifferent = v or 'committer:' |
|
124 | 124 | elif k == 'messagealways': |
|
125 | 125 | messagealways = v or 'committer:' |
|
126 | 126 | |
|
127 | 127 | if messagedifferent and messagealways: |
|
128 | 128 | raise error.Abort(_('committeractions cannot define both ' |
|
129 | 129 | 'messagedifferent and messagealways')) |
|
130 | 130 | |
|
131 | 131 | dropcommitter = 'dropcommitter' in committeractions |
|
132 | 132 | replaceauthor = 'replaceauthor' in committeractions |
|
133 | 133 | |
|
134 | 134 | if dropcommitter and replaceauthor: |
|
135 | 135 | raise error.Abort(_('committeractions cannot define both ' |
|
136 | 136 | 'dropcommitter and replaceauthor')) |
|
137 | 137 | |
|
138 | 138 | if dropcommitter and messagealways: |
|
139 | 139 | raise error.Abort(_('committeractions cannot define both ' |
|
140 | 140 | 'dropcommitter and messagealways')) |
|
141 | 141 | |
|
142 | 142 | if not messagedifferent and not messagealways: |
|
143 | 143 | messagedifferent = 'committer:' |
|
144 | 144 | |
|
145 | 145 | self.committeractions = { |
|
146 | 146 | 'dropcommitter': dropcommitter, |
|
147 | 147 | 'replaceauthor': replaceauthor, |
|
148 | 148 | 'messagedifferent': messagedifferent, |
|
149 | 149 | 'messagealways': messagealways, |
|
150 | 150 | } |
|
151 | 151 | |
|
152 | 152 | def after(self): |
|
153 | 153 | for f in self.catfilepipe: |
|
154 | 154 | f.close() |
|
155 | 155 | |
|
156 | 156 | def getheads(self): |
|
157 | 157 | if not self.revs: |
|
158 | 158 | output, status = self.gitrun('rev-parse', '--branches', '--remotes') |
|
159 | 159 | heads = output.splitlines() |
|
160 | 160 | if status: |
|
161 | 161 | raise error.Abort(_('cannot retrieve git heads')) |
|
162 | 162 | else: |
|
163 | 163 | heads = [] |
|
164 | 164 | for rev in self.revs: |
|
165 | 165 | rawhead, ret = self.gitrun('rev-parse', '--verify', rev) |
|
166 | 166 | heads.append(rawhead[:-1]) |
|
167 | 167 | if ret: |
|
168 | 168 | raise error.Abort(_('cannot retrieve git head "%s"') % rev) |
|
169 | 169 | return heads |
|
170 | 170 | |
|
171 | 171 | def catfile(self, rev, ftype): |
|
172 | 172 | if rev == nodemod.nullhex: |
|
173 | 173 | raise IOError |
|
174 | 174 | self.catfilepipe[0].write(rev+'\n') |
|
175 | 175 | self.catfilepipe[0].flush() |
|
176 | 176 | info = self.catfilepipe[1].readline().split() |
|
177 | 177 | if info[1] != ftype: |
|
178 | 178 | raise error.Abort(_('cannot read %r object at %s') % (ftype, rev)) |
|
179 | 179 | size = int(info[2]) |
|
180 | 180 | data = self.catfilepipe[1].read(size) |
|
181 | 181 | if len(data) < size: |
|
182 | 182 | raise error.Abort(_('cannot read %r object at %s: unexpected size') |
|
183 | 183 | % (ftype, rev)) |
|
184 | 184 | # read the trailing newline |
|
185 | 185 | self.catfilepipe[1].read(1) |
|
186 | 186 | return data |
|
187 | 187 | |
|
188 | 188 | def getfile(self, name, rev): |
|
189 | 189 | if rev == nodemod.nullhex: |
|
190 | 190 | return None, None |
|
191 | 191 | if name == '.hgsub': |
|
192 | 192 | data = '\n'.join([m.hgsub() for m in self.submoditer()]) |
|
193 | 193 | mode = '' |
|
194 | 194 | elif name == '.hgsubstate': |
|
195 | 195 | data = '\n'.join([m.hgsubstate() for m in self.submoditer()]) |
|
196 | 196 | mode = '' |
|
197 | 197 | else: |
|
198 | 198 | data = self.catfile(rev, "blob") |
|
199 | 199 | mode = self.modecache[(name, rev)] |
|
200 | 200 | return data, mode |
|
201 | 201 | |
|
202 | 202 | def submoditer(self): |
|
203 | 203 | null = nodemod.nullhex |
|
204 | 204 | for m in sorted(self.submodules, key=lambda p: p.path): |
|
205 | 205 | if m.node != null: |
|
206 | 206 | yield m |
|
207 | 207 | |
|
208 | 208 | def parsegitmodules(self, content): |
|
209 | 209 | """Parse the formatted .gitmodules file, example file format: |
|
210 | 210 | [submodule "sub"]\n |
|
211 | 211 | \tpath = sub\n |
|
212 | 212 | \turl = git://giturl\n |
|
213 | 213 | """ |
|
214 | 214 | self.submodules = [] |
|
215 | 215 | c = config.config() |
|
216 | 216 | # Each item in .gitmodules starts with whitespace that cant be parsed |
|
217 | 217 | c.parse('.gitmodules', '\n'.join(line.strip() for line in |
|
218 | 218 | content.split('\n'))) |
|
219 | 219 | for sec in c.sections(): |
|
220 | 220 | s = c[sec] |
|
221 | 221 | if 'url' in s and 'path' in s: |
|
222 | 222 | self.submodules.append(submodule(s['path'], '', s['url'])) |
|
223 | 223 | |
|
224 | 224 | def retrievegitmodules(self, version): |
|
225 | 225 | modules, ret = self.gitrun('show', '%s:%s' % (version, '.gitmodules')) |
|
226 | 226 | if ret: |
|
227 | 227 | # This can happen if a file is in the repo that has permissions |
|
228 | 228 | # 160000, but there is no .gitmodules file. |
|
229 | 229 | self.ui.warn(_("warning: cannot read submodules config file in " |
|
230 | 230 | "%s\n") % version) |
|
231 | 231 | return |
|
232 | 232 | |
|
233 | 233 | try: |
|
234 | 234 | self.parsegitmodules(modules) |
|
235 | 235 | except error.ParseError: |
|
236 | 236 | self.ui.warn(_("warning: unable to parse .gitmodules in %s\n") |
|
237 | 237 | % version) |
|
238 | 238 | return |
|
239 | 239 | |
|
240 | 240 | for m in self.submodules: |
|
241 | 241 | node, ret = self.gitrun('rev-parse', '%s:%s' % (version, m.path)) |
|
242 | 242 | if ret: |
|
243 | 243 | continue |
|
244 | 244 | m.node = node.strip() |
|
245 | 245 | |
|
246 | 246 | def getchanges(self, version, full): |
|
247 | 247 | if full: |
|
248 | 248 | raise error.Abort(_("convert from git does not support --full")) |
|
249 | 249 | self.modecache = {} |
|
250 | 250 | cmd = ['diff-tree','-z', '--root', '-m', '-r'] + self.simopt + [version] |
|
251 | 251 | output, status = self.gitrun(*cmd) |
|
252 | 252 | if status: |
|
253 | 253 | raise error.Abort(_('cannot read changes in %s') % version) |
|
254 | 254 | changes = [] |
|
255 | 255 | copies = {} |
|
256 | 256 | seen = set() |
|
257 | 257 | entry = None |
|
258 | 258 | subexists = [False] |
|
259 | 259 | subdeleted = [False] |
|
260 | 260 | difftree = output.split('\x00') |
|
261 | 261 | lcount = len(difftree) |
|
262 | 262 | i = 0 |
|
263 | 263 | |
|
264 | 264 | skipsubmodules = self.ui.configbool('convert', 'git.skipsubmodules') |
|
265 | 265 | def add(entry, f, isdest): |
|
266 | 266 | seen.add(f) |
|
267 | 267 | h = entry[3] |
|
268 | 268 | p = (entry[1] == "100755") |
|
269 | 269 | s = (entry[1] == "120000") |
|
270 | 270 | renamesource = (not isdest and entry[4][0] == 'R') |
|
271 | 271 | |
|
272 | 272 | if f == '.gitmodules': |
|
273 | 273 | if skipsubmodules: |
|
274 | 274 | return |
|
275 | 275 | |
|
276 | 276 | subexists[0] = True |
|
277 | 277 | if entry[4] == 'D' or renamesource: |
|
278 | 278 | subdeleted[0] = True |
|
279 | 279 | changes.append(('.hgsub', nodemod.nullhex)) |
|
280 | 280 | else: |
|
281 | 281 | changes.append(('.hgsub', '')) |
|
282 | 282 | elif entry[1] == '160000' or entry[0] == ':160000': |
|
283 | 283 | if not skipsubmodules: |
|
284 | 284 | subexists[0] = True |
|
285 | 285 | else: |
|
286 | 286 | if renamesource: |
|
287 | 287 | h = nodemod.nullhex |
|
288 | 288 | self.modecache[(f, h)] = (p and "x") or (s and "l") or "" |
|
289 | 289 | changes.append((f, h)) |
|
290 | 290 | |
|
291 | 291 | while i < lcount: |
|
292 | 292 | l = difftree[i] |
|
293 | 293 | i += 1 |
|
294 | 294 | if not entry: |
|
295 | 295 | if not l.startswith(':'): |
|
296 | 296 | continue |
|
297 | 297 | entry = l.split() |
|
298 | 298 | continue |
|
299 | 299 | f = l |
|
300 | 300 | if entry[4][0] == 'C': |
|
301 | 301 | copysrc = f |
|
302 | 302 | copydest = difftree[i] |
|
303 | 303 | i += 1 |
|
304 | 304 | f = copydest |
|
305 | 305 | copies[copydest] = copysrc |
|
306 | 306 | if f not in seen: |
|
307 | 307 | add(entry, f, False) |
|
308 | 308 | # A file can be copied multiple times, or modified and copied |
|
309 | 309 | # simultaneously. So f can be repeated even if fdest isn't. |
|
310 | 310 | if entry[4][0] == 'R': |
|
311 | 311 | # rename: next line is the destination |
|
312 | 312 | fdest = difftree[i] |
|
313 | 313 | i += 1 |
|
314 | 314 | if fdest not in seen: |
|
315 | 315 | add(entry, fdest, True) |
|
316 | 316 | # .gitmodules isn't imported at all, so it being copied to |
|
317 | 317 | # and fro doesn't really make sense |
|
318 | 318 | if f != '.gitmodules' and fdest != '.gitmodules': |
|
319 | 319 | copies[fdest] = f |
|
320 | 320 | entry = None |
|
321 | 321 | |
|
322 | 322 | if subexists[0]: |
|
323 | 323 | if subdeleted[0]: |
|
324 | 324 | changes.append(('.hgsubstate', nodemod.nullhex)) |
|
325 | 325 | else: |
|
326 | 326 | self.retrievegitmodules(version) |
|
327 | 327 | changes.append(('.hgsubstate', '')) |
|
328 | 328 | return (changes, copies, set()) |
|
329 | 329 | |
|
330 | 330 | def getcommit(self, version): |
|
331 | 331 | c = self.catfile(version, "commit") # read the commit hash |
|
332 | 332 | end = c.find("\n\n") |
|
333 | 333 | message = c[end + 2:] |
|
334 | 334 | message = self.recode(message) |
|
335 | 335 | l = c[:end].splitlines() |
|
336 | 336 | parents = [] |
|
337 | 337 | author = committer = None |
|
338 | 338 | extra = {} |
|
339 | 339 | for e in l[1:]: |
|
340 | 340 | n, v = e.split(" ", 1) |
|
341 | 341 | if n == "author": |
|
342 | 342 | p = v.split() |
|
343 | 343 | tm, tz = p[-2:] |
|
344 | 344 | author = " ".join(p[:-2]) |
|
345 | 345 | if author[0] == "<": |
|
346 | 346 | author = author[1:-1] |
|
347 | 347 | author = self.recode(author) |
|
348 | 348 | if n == "committer": |
|
349 | 349 | p = v.split() |
|
350 | 350 | tm, tz = p[-2:] |
|
351 | 351 | committer = " ".join(p[:-2]) |
|
352 | 352 | if committer[0] == "<": |
|
353 | 353 | committer = committer[1:-1] |
|
354 | 354 | committer = self.recode(committer) |
|
355 | 355 | if n == "parent": |
|
356 | 356 | parents.append(v) |
|
357 | 357 | if n in self.copyextrakeys: |
|
358 | 358 | extra[n] = v |
|
359 | 359 | |
|
360 | 360 | if self.committeractions['dropcommitter']: |
|
361 | 361 | committer = None |
|
362 | 362 | elif self.committeractions['replaceauthor']: |
|
363 | 363 | author = committer |
|
364 | 364 | |
|
365 | 365 | if committer: |
|
366 | 366 | messagealways = self.committeractions['messagealways'] |
|
367 | 367 | messagedifferent = self.committeractions['messagedifferent'] |
|
368 | 368 | if messagealways: |
|
369 | 369 | message += '\n%s %s\n' % (messagealways, committer) |
|
370 | 370 | elif messagedifferent and author != committer: |
|
371 | 371 | message += '\n%s %s\n' % (messagedifferent, committer) |
|
372 | 372 | |
|
373 | 373 | tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:] |
|
374 | 374 | tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) |
|
375 | 375 | date = tm + " " + str(tz) |
|
376 | 376 | saverev = self.ui.configbool('convert', 'git.saverev') |
|
377 | 377 | |
|
378 | 378 | c = common.commit(parents=parents, date=date, author=author, |
|
379 | 379 | desc=message, |
|
380 | 380 | rev=version, |
|
381 | 381 | extra=extra, |
|
382 | 382 | saverev=saverev) |
|
383 | 383 | return c |
|
384 | 384 | |
|
385 | 385 | def numcommits(self): |
|
386 | 386 | output, ret = self.gitrunlines('rev-list', '--all') |
|
387 | 387 | if ret: |
|
388 | 388 | raise error.Abort(_('cannot retrieve number of commits in %s') \ |
|
389 | 389 | % self.path) |
|
390 | 390 | return len(output) |
|
391 | 391 | |
|
392 | 392 | def gettags(self): |
|
393 | 393 | tags = {} |
|
394 | 394 | alltags = {} |
|
395 | 395 | output, status = self.gitrunlines('ls-remote', '--tags', self.path) |
|
396 | 396 | |
|
397 | 397 | if status: |
|
398 | 398 | raise error.Abort(_('cannot read tags from %s') % self.path) |
|
399 | 399 | prefix = 'refs/tags/' |
|
400 | 400 | |
|
401 | 401 | # Build complete list of tags, both annotated and bare ones |
|
402 | 402 | for line in output: |
|
403 | 403 | line = line.strip() |
|
404 | 404 | if line.startswith("error:") or line.startswith("fatal:"): |
|
405 | 405 | raise error.Abort(_('cannot read tags from %s') % self.path) |
|
406 | 406 | node, tag = line.split(None, 1) |
|
407 | 407 | if not tag.startswith(prefix): |
|
408 | 408 | continue |
|
409 | 409 | alltags[tag[len(prefix):]] = node |
|
410 | 410 | |
|
411 | 411 | # Filter out tag objects for annotated tag refs |
|
412 | 412 | for tag in alltags: |
|
413 | 413 | if tag.endswith('^{}'): |
|
414 | 414 | tags[tag[:-3]] = alltags[tag] |
|
415 | 415 | else: |
|
416 | 416 | if tag + '^{}' in alltags: |
|
417 | 417 | continue |
|
418 | 418 | else: |
|
419 | 419 | tags[tag] = alltags[tag] |
|
420 | 420 | |
|
421 | 421 | return tags |
|
422 | 422 | |
|
423 | 423 | def getchangedfiles(self, version, i): |
|
424 | 424 | changes = [] |
|
425 | 425 | if i is None: |
|
426 | 426 | output, status = self.gitrunlines('diff-tree', '--root', '-m', |
|
427 | 427 | '-r', version) |
|
428 | 428 | if status: |
|
429 | 429 | raise error.Abort(_('cannot read changes in %s') % version) |
|
430 | 430 | for l in output: |
|
431 | 431 | if "\t" not in l: |
|
432 | 432 | continue |
|
433 | 433 | m, f = l[:-1].split("\t") |
|
434 | 434 | changes.append(f) |
|
435 | 435 | else: |
|
436 | 436 | output, status = self.gitrunlines('diff-tree', '--name-only', |
|
437 | 437 | '--root', '-r', version, |
|
438 |
'%s^% |
|
|
438 | '%s^%d' % (version, i + 1), '--') | |
|
439 | 439 | if status: |
|
440 | 440 | raise error.Abort(_('cannot read changes in %s') % version) |
|
441 | 441 | changes = [f.rstrip('\n') for f in output] |
|
442 | 442 | |
|
443 | 443 | return changes |
|
444 | 444 | |
|
445 | 445 | def getbookmarks(self): |
|
446 | 446 | bookmarks = {} |
|
447 | 447 | |
|
448 | 448 | # Handle local and remote branches |
|
449 | 449 | remoteprefix = self.ui.config('convert', 'git.remoteprefix') |
|
450 | 450 | reftypes = [ |
|
451 | 451 | # (git prefix, hg prefix) |
|
452 | 452 | ('refs/remotes/origin/', remoteprefix + '/'), |
|
453 | 453 | ('refs/heads/', '') |
|
454 | 454 | ] |
|
455 | 455 | |
|
456 | 456 | exclude = { |
|
457 | 457 | 'refs/remotes/origin/HEAD', |
|
458 | 458 | } |
|
459 | 459 | |
|
460 | 460 | try: |
|
461 | 461 | output, status = self.gitrunlines('show-ref') |
|
462 | 462 | for line in output: |
|
463 | 463 | line = line.strip() |
|
464 | 464 | rev, name = line.split(None, 1) |
|
465 | 465 | # Process each type of branch |
|
466 | 466 | for gitprefix, hgprefix in reftypes: |
|
467 | 467 | if not name.startswith(gitprefix) or name in exclude: |
|
468 | 468 | continue |
|
469 | 469 | name = '%s%s' % (hgprefix, name[len(gitprefix):]) |
|
470 | 470 | bookmarks[name] = rev |
|
471 | 471 | except Exception: |
|
472 | 472 | pass |
|
473 | 473 | |
|
474 | 474 | return bookmarks |
|
475 | 475 | |
|
476 | 476 | def checkrevformat(self, revstr, mapname='splicemap'): |
|
477 | 477 | """ git revision string is a 40 byte hex """ |
|
478 | 478 | self.checkhexformat(revstr, mapname) |
@@ -1,647 +1,647 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | from __future__ import absolute_import |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import time |
|
24 | 24 | |
|
25 | 25 | from mercurial.i18n import _ |
|
26 | 26 | from mercurial import ( |
|
27 | 27 | bookmarks, |
|
28 | 28 | context, |
|
29 | 29 | error, |
|
30 | 30 | exchange, |
|
31 | 31 | hg, |
|
32 | 32 | lock as lockmod, |
|
33 | 33 | merge as mergemod, |
|
34 | 34 | node as nodemod, |
|
35 | 35 | phases, |
|
36 | 36 | scmutil, |
|
37 | 37 | util, |
|
38 | 38 | ) |
|
39 | 39 | from mercurial.utils import dateutil |
|
40 | 40 | stringio = util.stringio |
|
41 | 41 | |
|
42 | 42 | from . import common |
|
43 | 43 | mapfile = common.mapfile |
|
44 | 44 | NoRepo = common.NoRepo |
|
45 | 45 | |
|
46 | 46 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
47 | 47 | |
|
48 | 48 | class mercurial_sink(common.converter_sink): |
|
49 | 49 | def __init__(self, ui, repotype, path): |
|
50 | 50 | common.converter_sink.__init__(self, ui, repotype, path) |
|
51 | 51 | self.branchnames = ui.configbool('convert', 'hg.usebranchnames') |
|
52 | 52 | self.clonebranches = ui.configbool('convert', 'hg.clonebranches') |
|
53 | 53 | self.tagsbranch = ui.config('convert', 'hg.tagsbranch') |
|
54 | 54 | self.lastbranch = None |
|
55 | 55 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
56 | 56 | try: |
|
57 | 57 | self.repo = hg.repository(self.ui, path) |
|
58 | 58 | if not self.repo.local(): |
|
59 | 59 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
60 | 60 | % path) |
|
61 | 61 | except error.RepoError as err: |
|
62 | 62 | ui.traceback() |
|
63 | 63 | raise NoRepo(err.args[0]) |
|
64 | 64 | else: |
|
65 | 65 | try: |
|
66 | 66 | ui.status(_('initializing destination %s repository\n') % path) |
|
67 | 67 | self.repo = hg.repository(self.ui, path, create=True) |
|
68 | 68 | if not self.repo.local(): |
|
69 | 69 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
70 | 70 | % path) |
|
71 | 71 | self.created.append(path) |
|
72 | 72 | except error.RepoError: |
|
73 | 73 | ui.traceback() |
|
74 | 74 | raise NoRepo(_("could not create hg repository %s as sink") |
|
75 | 75 | % path) |
|
76 | 76 | self.lock = None |
|
77 | 77 | self.wlock = None |
|
78 | 78 | self.filemapmode = False |
|
79 | 79 | self.subrevmaps = {} |
|
80 | 80 | |
|
81 | 81 | def before(self): |
|
82 | 82 | self.ui.debug('run hg sink pre-conversion action\n') |
|
83 | 83 | self.wlock = self.repo.wlock() |
|
84 | 84 | self.lock = self.repo.lock() |
|
85 | 85 | |
|
86 | 86 | def after(self): |
|
87 | 87 | self.ui.debug('run hg sink post-conversion action\n') |
|
88 | 88 | if self.lock: |
|
89 | 89 | self.lock.release() |
|
90 | 90 | if self.wlock: |
|
91 | 91 | self.wlock.release() |
|
92 | 92 | |
|
93 | 93 | def revmapfile(self): |
|
94 | 94 | return self.repo.vfs.join("shamap") |
|
95 | 95 | |
|
96 | 96 | def authorfile(self): |
|
97 | 97 | return self.repo.vfs.join("authormap") |
|
98 | 98 | |
|
99 | 99 | def setbranch(self, branch, pbranches): |
|
100 | 100 | if not self.clonebranches: |
|
101 | 101 | return |
|
102 | 102 | |
|
103 | 103 | setbranch = (branch != self.lastbranch) |
|
104 | 104 | self.lastbranch = branch |
|
105 | 105 | if not branch: |
|
106 | 106 | branch = 'default' |
|
107 | 107 | pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches] |
|
108 | 108 | if pbranches: |
|
109 | 109 | pbranch = pbranches[0][1] |
|
110 | 110 | else: |
|
111 | 111 | pbranch = 'default' |
|
112 | 112 | |
|
113 | 113 | branchpath = os.path.join(self.path, branch) |
|
114 | 114 | if setbranch: |
|
115 | 115 | self.after() |
|
116 | 116 | try: |
|
117 | 117 | self.repo = hg.repository(self.ui, branchpath) |
|
118 | 118 | except Exception: |
|
119 | 119 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
120 | 120 | self.before() |
|
121 | 121 | |
|
122 | 122 | # pbranches may bring revisions from other branches (merge parents) |
|
123 | 123 | # Make sure we have them, or pull them. |
|
124 | 124 | missings = {} |
|
125 | 125 | for b in pbranches: |
|
126 | 126 | try: |
|
127 | 127 | self.repo.lookup(b[0]) |
|
128 | 128 | except Exception: |
|
129 | 129 | missings.setdefault(b[1], []).append(b[0]) |
|
130 | 130 | |
|
131 | 131 | if missings: |
|
132 | 132 | self.after() |
|
133 | 133 | for pbranch, heads in sorted(missings.iteritems()): |
|
134 | 134 | pbranchpath = os.path.join(self.path, pbranch) |
|
135 | 135 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
136 | 136 | self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch)) |
|
137 | 137 | exchange.pull(self.repo, prepo, |
|
138 | 138 | [prepo.lookup(h) for h in heads]) |
|
139 | 139 | self.before() |
|
140 | 140 | |
|
141 | 141 | def _rewritetags(self, source, revmap, data): |
|
142 | 142 | fp = stringio() |
|
143 | 143 | for line in data.splitlines(): |
|
144 | 144 | s = line.split(' ', 1) |
|
145 | 145 | if len(s) != 2: |
|
146 | 146 | continue |
|
147 | 147 | revid = revmap.get(source.lookuprev(s[0])) |
|
148 | 148 | if not revid: |
|
149 | 149 | if s[0] == nodemod.nullhex: |
|
150 | 150 | revid = s[0] |
|
151 | 151 | else: |
|
152 | 152 | continue |
|
153 | 153 | fp.write('%s %s\n' % (revid, s[1])) |
|
154 | 154 | return fp.getvalue() |
|
155 | 155 | |
|
156 | 156 | def _rewritesubstate(self, source, data): |
|
157 | 157 | fp = stringio() |
|
158 | 158 | for line in data.splitlines(): |
|
159 | 159 | s = line.split(' ', 1) |
|
160 | 160 | if len(s) != 2: |
|
161 | 161 | continue |
|
162 | 162 | |
|
163 | 163 | revid = s[0] |
|
164 | 164 | subpath = s[1] |
|
165 | 165 | if revid != nodemod.nullhex: |
|
166 | 166 | revmap = self.subrevmaps.get(subpath) |
|
167 | 167 | if revmap is None: |
|
168 | 168 | revmap = mapfile(self.ui, |
|
169 | 169 | self.repo.wjoin(subpath, '.hg/shamap')) |
|
170 | 170 | self.subrevmaps[subpath] = revmap |
|
171 | 171 | |
|
172 | 172 | # It is reasonable that one or more of the subrepos don't |
|
173 | 173 | # need to be converted, in which case they can be cloned |
|
174 | 174 | # into place instead of converted. Therefore, only warn |
|
175 | 175 | # once. |
|
176 | 176 | msg = _('no ".hgsubstate" updates will be made for "%s"\n') |
|
177 | 177 | if len(revmap) == 0: |
|
178 | 178 | sub = self.repo.wvfs.reljoin(subpath, '.hg') |
|
179 | 179 | |
|
180 | 180 | if self.repo.wvfs.exists(sub): |
|
181 | 181 | self.ui.warn(msg % subpath) |
|
182 | 182 | |
|
183 | 183 | newid = revmap.get(revid) |
|
184 | 184 | if not newid: |
|
185 | 185 | if len(revmap) > 0: |
|
186 | 186 | self.ui.warn(_("%s is missing from %s/.hg/shamap\n") % |
|
187 | 187 | (revid, subpath)) |
|
188 | 188 | else: |
|
189 | 189 | revid = newid |
|
190 | 190 | |
|
191 | 191 | fp.write('%s %s\n' % (revid, subpath)) |
|
192 | 192 | |
|
193 | 193 | return fp.getvalue() |
|
194 | 194 | |
|
195 | 195 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
196 | 196 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
197 | 197 | and p2, given that the merge is coming from the given source. |
|
198 | 198 | |
|
199 | 199 | This prevents us from losing files that only exist in the target p2 and |
|
200 | 200 | that don't come from the source repo (like if you're merging multiple |
|
201 | 201 | repositories together). |
|
202 | 202 | """ |
|
203 | 203 | anc = [p1ctx.ancestor(p2ctx)] |
|
204 | 204 | # Calculate what files are coming from p2 |
|
205 | 205 | actions, diverge, rename = mergemod.calculateupdates( |
|
206 | 206 | self.repo, p1ctx, p2ctx, anc, |
|
207 | 207 | True, # branchmerge |
|
208 | 208 | True, # force |
|
209 | 209 | False, # acceptremote |
|
210 | 210 | False, # followcopies |
|
211 | 211 | ) |
|
212 | 212 | |
|
213 | 213 | for file, (action, info, msg) in actions.iteritems(): |
|
214 | 214 | if source.targetfilebelongstosource(file): |
|
215 | 215 | # If the file belongs to the source repo, ignore the p2 |
|
216 | 216 | # since it will be covered by the existing fileset. |
|
217 | 217 | continue |
|
218 | 218 | |
|
219 | 219 | # If the file requires actual merging, abort. We don't have enough |
|
220 | 220 | # context to resolve merges correctly. |
|
221 | 221 | if action in ['m', 'dm', 'cd', 'dc']: |
|
222 | 222 | raise error.Abort(_("unable to convert merge commit " |
|
223 | 223 | "since target parents do not merge cleanly (file " |
|
224 | 224 | "%s, parents %s and %s)") % (file, p1ctx, |
|
225 | 225 | p2ctx)) |
|
226 | 226 | elif action == 'k': |
|
227 | 227 | # 'keep' means nothing changed from p1 |
|
228 | 228 | continue |
|
229 | 229 | else: |
|
230 | 230 | # Any other change means we want to take the p2 version |
|
231 | 231 | yield file |
|
232 | 232 | |
|
233 | 233 | def putcommit(self, files, copies, parents, commit, source, revmap, full, |
|
234 | 234 | cleanp2): |
|
235 | 235 | files = dict(files) |
|
236 | 236 | |
|
237 | 237 | def getfilectx(repo, memctx, f): |
|
238 | 238 | if p2ctx and f in p2files and f not in copies: |
|
239 | 239 | self.ui.debug('reusing %s from p2\n' % f) |
|
240 | 240 | try: |
|
241 | 241 | return p2ctx[f] |
|
242 | 242 | except error.ManifestLookupError: |
|
243 | 243 | # If the file doesn't exist in p2, then we're syncing a |
|
244 | 244 | # delete, so just return None. |
|
245 | 245 | return None |
|
246 | 246 | try: |
|
247 | 247 | v = files[f] |
|
248 | 248 | except KeyError: |
|
249 | 249 | return None |
|
250 | 250 | data, mode = source.getfile(f, v) |
|
251 | 251 | if data is None: |
|
252 | 252 | return None |
|
253 | 253 | if f == '.hgtags': |
|
254 | 254 | data = self._rewritetags(source, revmap, data) |
|
255 | 255 | if f == '.hgsubstate': |
|
256 | 256 | data = self._rewritesubstate(source, data) |
|
257 | 257 | return context.memfilectx(self.repo, memctx, f, data, 'l' in mode, |
|
258 | 258 | 'x' in mode, copies.get(f)) |
|
259 | 259 | |
|
260 | 260 | pl = [] |
|
261 | 261 | for p in parents: |
|
262 | 262 | if p not in pl: |
|
263 | 263 | pl.append(p) |
|
264 | 264 | parents = pl |
|
265 | 265 | nparents = len(parents) |
|
266 | 266 | if self.filemapmode and nparents == 1: |
|
267 | 267 | m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0] |
|
268 | 268 | parent = parents[0] |
|
269 | 269 | |
|
270 | 270 | if len(parents) < 2: |
|
271 | 271 | parents.append(nodemod.nullid) |
|
272 | 272 | if len(parents) < 2: |
|
273 | 273 | parents.append(nodemod.nullid) |
|
274 | 274 | p2 = parents.pop(0) |
|
275 | 275 | |
|
276 | 276 | text = commit.desc |
|
277 | 277 | |
|
278 | 278 | sha1s = re.findall(sha1re, text) |
|
279 | 279 | for sha1 in sha1s: |
|
280 | 280 | oldrev = source.lookuprev(sha1) |
|
281 | 281 | newrev = revmap.get(oldrev) |
|
282 | 282 | if newrev is not None: |
|
283 | 283 | text = text.replace(sha1, newrev[:len(sha1)]) |
|
284 | 284 | |
|
285 | 285 | extra = commit.extra.copy() |
|
286 | 286 | |
|
287 | 287 | sourcename = self.repo.ui.config('convert', 'hg.sourcename') |
|
288 | 288 | if sourcename: |
|
289 | 289 | extra['convert_source'] = sourcename |
|
290 | 290 | |
|
291 | 291 | for label in ('source', 'transplant_source', 'rebase_source', |
|
292 | 292 | 'intermediate-source'): |
|
293 | 293 | node = extra.get(label) |
|
294 | 294 | |
|
295 | 295 | if node is None: |
|
296 | 296 | continue |
|
297 | 297 | |
|
298 | 298 | # Only transplant stores its reference in binary |
|
299 | 299 | if label == 'transplant_source': |
|
300 | 300 | node = nodemod.hex(node) |
|
301 | 301 | |
|
302 | 302 | newrev = revmap.get(node) |
|
303 | 303 | if newrev is not None: |
|
304 | 304 | if label == 'transplant_source': |
|
305 | 305 | newrev = nodemod.bin(newrev) |
|
306 | 306 | |
|
307 | 307 | extra[label] = newrev |
|
308 | 308 | |
|
309 | 309 | if self.branchnames and commit.branch: |
|
310 | 310 | extra['branch'] = commit.branch |
|
311 | 311 | if commit.rev and commit.saverev: |
|
312 | 312 | extra['convert_revision'] = commit.rev |
|
313 | 313 | |
|
314 | 314 | while parents: |
|
315 | 315 | p1 = p2 |
|
316 | 316 | p2 = parents.pop(0) |
|
317 | 317 | p1ctx = self.repo[p1] |
|
318 | 318 | p2ctx = None |
|
319 | 319 | if p2 != nodemod.nullid: |
|
320 | 320 | p2ctx = self.repo[p2] |
|
321 | 321 | fileset = set(files) |
|
322 | 322 | if full: |
|
323 | 323 | fileset.update(self.repo[p1]) |
|
324 | 324 | fileset.update(self.repo[p2]) |
|
325 | 325 | |
|
326 | 326 | if p2ctx: |
|
327 | 327 | p2files = set(cleanp2) |
|
328 | 328 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
329 | 329 | p2files.add(file) |
|
330 | 330 | fileset.add(file) |
|
331 | 331 | |
|
332 | 332 | ctx = context.memctx(self.repo, (p1, p2), text, fileset, |
|
333 | 333 | getfilectx, commit.author, commit.date, extra) |
|
334 | 334 | |
|
335 | 335 | # We won't know if the conversion changes the node until after the |
|
336 | 336 | # commit, so copy the source's phase for now. |
|
337 | 337 | self.repo.ui.setconfig('phases', 'new-commit', |
|
338 | 338 | phases.phasenames[commit.phase], 'convert') |
|
339 | 339 | |
|
340 | 340 | with self.repo.transaction("convert") as tr: |
|
341 | 341 | node = nodemod.hex(self.repo.commitctx(ctx)) |
|
342 | 342 | |
|
343 | 343 | # If the node value has changed, but the phase is lower than |
|
344 | 344 | # draft, set it back to draft since it hasn't been exposed |
|
345 | 345 | # anywhere. |
|
346 | 346 | if commit.rev != node: |
|
347 | 347 | ctx = self.repo[node] |
|
348 | 348 | if ctx.phase() < phases.draft: |
|
349 | 349 | phases.registernew(self.repo, tr, phases.draft, |
|
350 | 350 | [ctx.node()]) |
|
351 | 351 | |
|
352 | 352 | text = "(octopus merge fixup)\n" |
|
353 | 353 | p2 = node |
|
354 | 354 | |
|
355 | 355 | if self.filemapmode and nparents == 1: |
|
356 | 356 | man = self.repo.manifestlog._revlog |
|
357 | 357 | mnode = self.repo.changelog.read(nodemod.bin(p2))[0] |
|
358 | 358 | closed = 'close' in commit.extra |
|
359 | 359 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
360 | 360 | self.ui.status(_("filtering out empty revision\n")) |
|
361 | 361 | self.repo.rollback(force=True) |
|
362 | 362 | return parent |
|
363 | 363 | return p2 |
|
364 | 364 | |
|
365 | 365 | def puttags(self, tags): |
|
366 | 366 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
367 | 367 | tagparent = tagparent or nodemod.nullid |
|
368 | 368 | |
|
369 | 369 | oldlines = set() |
|
370 | 370 | for branch, heads in self.repo.branchmap().iteritems(): |
|
371 | 371 | for h in heads: |
|
372 | 372 | if '.hgtags' in self.repo[h]: |
|
373 | 373 | oldlines.update( |
|
374 | 374 | set(self.repo[h]['.hgtags'].data().splitlines(True))) |
|
375 | 375 | oldlines = sorted(list(oldlines)) |
|
376 | 376 | |
|
377 | 377 | newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
378 | 378 | if newlines == oldlines: |
|
379 | 379 | return None, None |
|
380 | 380 | |
|
381 | 381 | # if the old and new tags match, then there is nothing to update |
|
382 | 382 | oldtags = set() |
|
383 | 383 | newtags = set() |
|
384 | 384 | for line in oldlines: |
|
385 | 385 | s = line.strip().split(' ', 1) |
|
386 | 386 | if len(s) != 2: |
|
387 | 387 | continue |
|
388 | 388 | oldtags.add(s[1]) |
|
389 | 389 | for line in newlines: |
|
390 | 390 | s = line.strip().split(' ', 1) |
|
391 | 391 | if len(s) != 2: |
|
392 | 392 | continue |
|
393 | 393 | if s[1] not in oldtags: |
|
394 | 394 | newtags.add(s[1].strip()) |
|
395 | 395 | |
|
396 | 396 | if not newtags: |
|
397 | 397 | return None, None |
|
398 | 398 | |
|
399 | 399 | data = "".join(newlines) |
|
400 | 400 | def getfilectx(repo, memctx, f): |
|
401 | 401 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
402 | 402 | |
|
403 | 403 | self.ui.status(_("updating tags\n")) |
|
404 |
date = "% |
|
|
404 | date = "%d 0" % int(time.mktime(time.gmtime())) | |
|
405 | 405 | extra = {'branch': self.tagsbranch} |
|
406 | 406 | ctx = context.memctx(self.repo, (tagparent, None), "update tags", |
|
407 | 407 | [".hgtags"], getfilectx, "convert-repo", date, |
|
408 | 408 | extra) |
|
409 | 409 | node = self.repo.commitctx(ctx) |
|
410 | 410 | return nodemod.hex(node), nodemod.hex(tagparent) |
|
411 | 411 | |
|
412 | 412 | def setfilemapmode(self, active): |
|
413 | 413 | self.filemapmode = active |
|
414 | 414 | |
|
415 | 415 | def putbookmarks(self, updatedbookmark): |
|
416 | 416 | if not len(updatedbookmark): |
|
417 | 417 | return |
|
418 | 418 | wlock = lock = tr = None |
|
419 | 419 | try: |
|
420 | 420 | wlock = self.repo.wlock() |
|
421 | 421 | lock = self.repo.lock() |
|
422 | 422 | tr = self.repo.transaction('bookmark') |
|
423 | 423 | self.ui.status(_("updating bookmarks\n")) |
|
424 | 424 | destmarks = self.repo._bookmarks |
|
425 | 425 | changes = [(bookmark, nodemod.bin(updatedbookmark[bookmark])) |
|
426 | 426 | for bookmark in updatedbookmark] |
|
427 | 427 | destmarks.applychanges(self.repo, tr, changes) |
|
428 | 428 | tr.close() |
|
429 | 429 | finally: |
|
430 | 430 | lockmod.release(lock, wlock, tr) |
|
431 | 431 | |
|
432 | 432 | def hascommitfrommap(self, rev): |
|
433 | 433 | # the exact semantics of clonebranches is unclear so we can't say no |
|
434 | 434 | return rev in self.repo or self.clonebranches |
|
435 | 435 | |
|
436 | 436 | def hascommitforsplicemap(self, rev): |
|
437 | 437 | if rev not in self.repo and self.clonebranches: |
|
438 | 438 | raise error.Abort(_('revision %s not found in destination ' |
|
439 | 439 | 'repository (lookups with clonebranches=true ' |
|
440 | 440 | 'are not implemented)') % rev) |
|
441 | 441 | return rev in self.repo |
|
442 | 442 | |
|
443 | 443 | class mercurial_source(common.converter_source): |
|
444 | 444 | def __init__(self, ui, repotype, path, revs=None): |
|
445 | 445 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
446 | 446 | self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors') |
|
447 | 447 | self.ignored = set() |
|
448 | 448 | self.saverev = ui.configbool('convert', 'hg.saverev') |
|
449 | 449 | try: |
|
450 | 450 | self.repo = hg.repository(self.ui, path) |
|
451 | 451 | # try to provoke an exception if this isn't really a hg |
|
452 | 452 | # repo, but some other bogus compatible-looking url |
|
453 | 453 | if not self.repo.local(): |
|
454 | 454 | raise error.RepoError |
|
455 | 455 | except error.RepoError: |
|
456 | 456 | ui.traceback() |
|
457 | 457 | raise NoRepo(_("%s is not a local Mercurial repository") % path) |
|
458 | 458 | self.lastrev = None |
|
459 | 459 | self.lastctx = None |
|
460 | 460 | self._changescache = None, None |
|
461 | 461 | self.convertfp = None |
|
462 | 462 | # Restrict converted revisions to startrev descendants |
|
463 | 463 | startnode = ui.config('convert', 'hg.startrev') |
|
464 | 464 | hgrevs = ui.config('convert', 'hg.revs') |
|
465 | 465 | if hgrevs is None: |
|
466 | 466 | if startnode is not None: |
|
467 | 467 | try: |
|
468 | 468 | startnode = self.repo.lookup(startnode) |
|
469 | 469 | except error.RepoError: |
|
470 | 470 | raise error.Abort(_('%s is not a valid start revision') |
|
471 | 471 | % startnode) |
|
472 | 472 | startrev = self.repo.changelog.rev(startnode) |
|
473 | 473 | children = {startnode: 1} |
|
474 | 474 | for r in self.repo.changelog.descendants([startrev]): |
|
475 | 475 | children[self.repo.changelog.node(r)] = 1 |
|
476 | 476 | self.keep = children.__contains__ |
|
477 | 477 | else: |
|
478 | 478 | self.keep = util.always |
|
479 | 479 | if revs: |
|
480 | 480 | self._heads = [self.repo.lookup(r) for r in revs] |
|
481 | 481 | else: |
|
482 | 482 | self._heads = self.repo.heads() |
|
483 | 483 | else: |
|
484 | 484 | if revs or startnode is not None: |
|
485 | 485 | raise error.Abort(_('hg.revs cannot be combined with ' |
|
486 | 486 | 'hg.startrev or --rev')) |
|
487 | 487 | nodes = set() |
|
488 | 488 | parents = set() |
|
489 | 489 | for r in scmutil.revrange(self.repo, [hgrevs]): |
|
490 | 490 | ctx = self.repo[r] |
|
491 | 491 | nodes.add(ctx.node()) |
|
492 | 492 | parents.update(p.node() for p in ctx.parents()) |
|
493 | 493 | self.keep = nodes.__contains__ |
|
494 | 494 | self._heads = nodes - parents |
|
495 | 495 | |
|
496 | 496 | def _changectx(self, rev): |
|
497 | 497 | if self.lastrev != rev: |
|
498 | 498 | self.lastctx = self.repo[rev] |
|
499 | 499 | self.lastrev = rev |
|
500 | 500 | return self.lastctx |
|
501 | 501 | |
|
502 | 502 | def _parents(self, ctx): |
|
503 | 503 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
504 | 504 | |
|
505 | 505 | def getheads(self): |
|
506 | 506 | return [nodemod.hex(h) for h in self._heads if self.keep(h)] |
|
507 | 507 | |
|
508 | 508 | def getfile(self, name, rev): |
|
509 | 509 | try: |
|
510 | 510 | fctx = self._changectx(rev)[name] |
|
511 | 511 | return fctx.data(), fctx.flags() |
|
512 | 512 | except error.LookupError: |
|
513 | 513 | return None, None |
|
514 | 514 | |
|
515 | 515 | def _changedfiles(self, ctx1, ctx2): |
|
516 | 516 | ma, r = [], [] |
|
517 | 517 | maappend = ma.append |
|
518 | 518 | rappend = r.append |
|
519 | 519 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
520 | 520 | for f, ((node1, flag1), (node2, flag2)) in d.iteritems(): |
|
521 | 521 | if node2 is None: |
|
522 | 522 | rappend(f) |
|
523 | 523 | else: |
|
524 | 524 | maappend(f) |
|
525 | 525 | return ma, r |
|
526 | 526 | |
|
527 | 527 | def getchanges(self, rev, full): |
|
528 | 528 | ctx = self._changectx(rev) |
|
529 | 529 | parents = self._parents(ctx) |
|
530 | 530 | if full or not parents: |
|
531 | 531 | files = copyfiles = ctx.manifest() |
|
532 | 532 | if parents: |
|
533 | 533 | if self._changescache[0] == rev: |
|
534 | 534 | ma, r = self._changescache[1] |
|
535 | 535 | else: |
|
536 | 536 | ma, r = self._changedfiles(parents[0], ctx) |
|
537 | 537 | if not full: |
|
538 | 538 | files = ma + r |
|
539 | 539 | copyfiles = ma |
|
540 | 540 | # _getcopies() is also run for roots and before filtering so missing |
|
541 | 541 | # revlogs are detected early |
|
542 | 542 | copies = self._getcopies(ctx, parents, copyfiles) |
|
543 | 543 | cleanp2 = set() |
|
544 | 544 | if len(parents) == 2: |
|
545 | 545 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
546 | 546 | for f, value in d.iteritems(): |
|
547 | 547 | if value is None: |
|
548 | 548 | cleanp2.add(f) |
|
549 | 549 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
550 | 550 | changes.sort() |
|
551 | 551 | return changes, copies, cleanp2 |
|
552 | 552 | |
|
553 | 553 | def _getcopies(self, ctx, parents, files): |
|
554 | 554 | copies = {} |
|
555 | 555 | for name in files: |
|
556 | 556 | if name in self.ignored: |
|
557 | 557 | continue |
|
558 | 558 | try: |
|
559 | 559 | copysource, _copynode = ctx.filectx(name).renamed() |
|
560 | 560 | if copysource in self.ignored: |
|
561 | 561 | continue |
|
562 | 562 | # Ignore copy sources not in parent revisions |
|
563 | 563 | if not any(copysource in p for p in parents): |
|
564 | 564 | continue |
|
565 | 565 | copies[name] = copysource |
|
566 | 566 | except TypeError: |
|
567 | 567 | pass |
|
568 | 568 | except error.LookupError as e: |
|
569 | 569 | if not self.ignoreerrors: |
|
570 | 570 | raise |
|
571 | 571 | self.ignored.add(name) |
|
572 | 572 | self.ui.warn(_('ignoring: %s\n') % e) |
|
573 | 573 | return copies |
|
574 | 574 | |
|
575 | 575 | def getcommit(self, rev): |
|
576 | 576 | ctx = self._changectx(rev) |
|
577 | 577 | _parents = self._parents(ctx) |
|
578 | 578 | parents = [p.hex() for p in _parents] |
|
579 | 579 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
580 | 580 | crev = rev |
|
581 | 581 | |
|
582 | 582 | return common.commit(author=ctx.user(), |
|
583 | 583 | date=dateutil.datestr(ctx.date(), |
|
584 | 584 | '%Y-%m-%d %H:%M:%S %1%2'), |
|
585 | 585 | desc=ctx.description(), |
|
586 | 586 | rev=crev, |
|
587 | 587 | parents=parents, |
|
588 | 588 | optparents=optparents, |
|
589 | 589 | branch=ctx.branch(), |
|
590 | 590 | extra=ctx.extra(), |
|
591 | 591 | sortkey=ctx.rev(), |
|
592 | 592 | saverev=self.saverev, |
|
593 | 593 | phase=ctx.phase()) |
|
594 | 594 | |
|
595 | 595 | def gettags(self): |
|
596 | 596 | # This will get written to .hgtags, filter non global tags out. |
|
597 | 597 | tags = [t for t in self.repo.tagslist() |
|
598 | 598 | if self.repo.tagtype(t[0]) == 'global'] |
|
599 | 599 | return dict([(name, nodemod.hex(node)) for name, node in tags |
|
600 | 600 | if self.keep(node)]) |
|
601 | 601 | |
|
602 | 602 | def getchangedfiles(self, rev, i): |
|
603 | 603 | ctx = self._changectx(rev) |
|
604 | 604 | parents = self._parents(ctx) |
|
605 | 605 | if not parents and i is None: |
|
606 | 606 | i = 0 |
|
607 | 607 | ma, r = ctx.manifest().keys(), [] |
|
608 | 608 | else: |
|
609 | 609 | i = i or 0 |
|
610 | 610 | ma, r = self._changedfiles(parents[i], ctx) |
|
611 | 611 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
612 | 612 | |
|
613 | 613 | if i == 0: |
|
614 | 614 | self._changescache = (rev, (ma, r)) |
|
615 | 615 | |
|
616 | 616 | return ma + r |
|
617 | 617 | |
|
618 | 618 | def converted(self, rev, destrev): |
|
619 | 619 | if self.convertfp is None: |
|
620 | 620 | self.convertfp = open(self.repo.vfs.join('shamap'), 'ab') |
|
621 | 621 | self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev))) |
|
622 | 622 | self.convertfp.flush() |
|
623 | 623 | |
|
624 | 624 | def before(self): |
|
625 | 625 | self.ui.debug('run hg source pre-conversion action\n') |
|
626 | 626 | |
|
627 | 627 | def after(self): |
|
628 | 628 | self.ui.debug('run hg source post-conversion action\n') |
|
629 | 629 | |
|
630 | 630 | def hasnativeorder(self): |
|
631 | 631 | return True |
|
632 | 632 | |
|
633 | 633 | def hasnativeclose(self): |
|
634 | 634 | return True |
|
635 | 635 | |
|
636 | 636 | def lookuprev(self, rev): |
|
637 | 637 | try: |
|
638 | 638 | return nodemod.hex(self.repo.lookup(rev)) |
|
639 | 639 | except (error.RepoError, error.LookupError): |
|
640 | 640 | return None |
|
641 | 641 | |
|
642 | 642 | def getbookmarks(self): |
|
643 | 643 | return bookmarks.listbookmarks(self.repo) |
|
644 | 644 | |
|
645 | 645 | def checkrevformat(self, revstr, mapname='splicemap'): |
|
646 | 646 | """ Mercurial, revision string is a 40 byte hex """ |
|
647 | 647 | self.checkhexformat(revstr, mapname) |
@@ -1,1186 +1,1186 b'' | |||
|
1 | 1 | # Infinite push |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """ store some pushes in a remote blob store on the server (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | [infinitepush] |
|
10 | 10 | # Server-side and client-side option. Pattern of the infinitepush bookmark |
|
11 | 11 | branchpattern = PATTERN |
|
12 | 12 | |
|
13 | 13 | # Server or client |
|
14 | 14 | server = False |
|
15 | 15 | |
|
16 | 16 | # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set |
|
17 | 17 | indextype = disk |
|
18 | 18 | |
|
19 | 19 | # Server-side option. Used only if indextype=sql. |
|
20 | 20 | # Format: 'IP:PORT:DB_NAME:USER:PASSWORD' |
|
21 | 21 | sqlhost = IP:PORT:DB_NAME:USER:PASSWORD |
|
22 | 22 | |
|
23 | 23 | # Server-side option. Used only if indextype=disk. |
|
24 | 24 | # Filesystem path to the index store |
|
25 | 25 | indexpath = PATH |
|
26 | 26 | |
|
27 | 27 | # Server-side option. Possible values: 'disk' or 'external' |
|
28 | 28 | # Fails if not set |
|
29 | 29 | storetype = disk |
|
30 | 30 | |
|
31 | 31 | # Server-side option. |
|
32 | 32 | # Path to the binary that will save bundle to the bundlestore |
|
33 | 33 | # Formatted cmd line will be passed to it (see `put_args`) |
|
34 | 34 | put_binary = put |
|
35 | 35 | |
|
36 | 36 | # Serser-side option. Used only if storetype=external. |
|
37 | 37 | # Format cmd-line string for put binary. Placeholder: {filename} |
|
38 | 38 | put_args = {filename} |
|
39 | 39 | |
|
40 | 40 | # Server-side option. |
|
41 | 41 | # Path to the binary that get bundle from the bundlestore. |
|
42 | 42 | # Formatted cmd line will be passed to it (see `get_args`) |
|
43 | 43 | get_binary = get |
|
44 | 44 | |
|
45 | 45 | # Serser-side option. Used only if storetype=external. |
|
46 | 46 | # Format cmd-line string for get binary. Placeholders: {filename} {handle} |
|
47 | 47 | get_args = {filename} {handle} |
|
48 | 48 | |
|
49 | 49 | # Server-side option |
|
50 | 50 | logfile = FIlE |
|
51 | 51 | |
|
52 | 52 | # Server-side option |
|
53 | 53 | loglevel = DEBUG |
|
54 | 54 | |
|
55 | 55 | # Server-side option. Used only if indextype=sql. |
|
56 | 56 | # Sets mysql wait_timeout option. |
|
57 | 57 | waittimeout = 300 |
|
58 | 58 | |
|
59 | 59 | # Server-side option. Used only if indextype=sql. |
|
60 | 60 | # Sets mysql innodb_lock_wait_timeout option. |
|
61 | 61 | locktimeout = 120 |
|
62 | 62 | |
|
63 | 63 | # Server-side option. Used only if indextype=sql. |
|
64 | 64 | # Name of the repository |
|
65 | 65 | reponame = '' |
|
66 | 66 | |
|
67 | 67 | # Client-side option. Used by --list-remote option. List of remote scratch |
|
68 | 68 | # patterns to list if no patterns are specified. |
|
69 | 69 | defaultremotepatterns = ['*'] |
|
70 | 70 | |
|
71 | 71 | # Instructs infinitepush to forward all received bundle2 parts to the |
|
72 | 72 | # bundle for storage. Defaults to False. |
|
73 | 73 | storeallparts = True |
|
74 | 74 | |
|
75 | 75 | # routes each incoming push to the bundlestore. defaults to False |
|
76 | 76 | pushtobundlestore = True |
|
77 | 77 | |
|
78 | 78 | [remotenames] |
|
79 | 79 | # Client-side option |
|
80 | 80 | # This option should be set only if remotenames extension is enabled. |
|
81 | 81 | # Whether remote bookmarks are tracked by remotenames extension. |
|
82 | 82 | bookmarks = True |
|
83 | 83 | """ |
|
84 | 84 | |
|
85 | 85 | from __future__ import absolute_import |
|
86 | 86 | |
|
87 | 87 | import collections |
|
88 | 88 | import contextlib |
|
89 | 89 | import errno |
|
90 | 90 | import functools |
|
91 | 91 | import logging |
|
92 | 92 | import os |
|
93 | 93 | import random |
|
94 | 94 | import re |
|
95 | 95 | import socket |
|
96 | 96 | import subprocess |
|
97 | 97 | import tempfile |
|
98 | 98 | import time |
|
99 | 99 | |
|
100 | 100 | from mercurial.node import ( |
|
101 | 101 | bin, |
|
102 | 102 | hex, |
|
103 | 103 | ) |
|
104 | 104 | |
|
105 | 105 | from mercurial.i18n import _ |
|
106 | 106 | |
|
107 | 107 | from mercurial.utils import ( |
|
108 | 108 | procutil, |
|
109 | 109 | stringutil, |
|
110 | 110 | ) |
|
111 | 111 | |
|
112 | 112 | from mercurial import ( |
|
113 | 113 | bundle2, |
|
114 | 114 | changegroup, |
|
115 | 115 | commands, |
|
116 | 116 | discovery, |
|
117 | 117 | encoding, |
|
118 | 118 | error, |
|
119 | 119 | exchange, |
|
120 | 120 | extensions, |
|
121 | 121 | hg, |
|
122 | 122 | localrepo, |
|
123 | 123 | peer, |
|
124 | 124 | phases, |
|
125 | 125 | pushkey, |
|
126 | 126 | pycompat, |
|
127 | 127 | registrar, |
|
128 | 128 | util, |
|
129 | 129 | wireproto, |
|
130 | 130 | ) |
|
131 | 131 | |
|
132 | 132 | from . import ( |
|
133 | 133 | bundleparts, |
|
134 | 134 | common, |
|
135 | 135 | ) |
|
136 | 136 | |
|
137 | 137 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
138 | 138 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
139 | 139 | # be specifying the version(s) of Mercurial they are tested with, or |
|
140 | 140 | # leave the attribute unspecified. |
|
141 | 141 | testedwith = 'ships-with-hg-core' |
|
142 | 142 | |
|
143 | 143 | configtable = {} |
|
144 | 144 | configitem = registrar.configitem(configtable) |
|
145 | 145 | |
|
146 | 146 | configitem('infinitepush', 'server', |
|
147 | 147 | default=False, |
|
148 | 148 | ) |
|
149 | 149 | configitem('infinitepush', 'storetype', |
|
150 | 150 | default='', |
|
151 | 151 | ) |
|
152 | 152 | configitem('infinitepush', 'indextype', |
|
153 | 153 | default='', |
|
154 | 154 | ) |
|
155 | 155 | configitem('infinitepush', 'indexpath', |
|
156 | 156 | default='', |
|
157 | 157 | ) |
|
158 | 158 | configitem('infinitepush', 'storeallparts', |
|
159 | 159 | default=False, |
|
160 | 160 | ) |
|
161 | 161 | configitem('infinitepush', 'reponame', |
|
162 | 162 | default='', |
|
163 | 163 | ) |
|
164 | 164 | configitem('scratchbranch', 'storepath', |
|
165 | 165 | default='', |
|
166 | 166 | ) |
|
167 | 167 | configitem('infinitepush', 'branchpattern', |
|
168 | 168 | default='', |
|
169 | 169 | ) |
|
170 | 170 | configitem('infinitepush', 'pushtobundlestore', |
|
171 | 171 | default=False, |
|
172 | 172 | ) |
|
173 | 173 | configitem('experimental', 'server-bundlestore-bookmark', |
|
174 | 174 | default='', |
|
175 | 175 | ) |
|
176 | 176 | configitem('experimental', 'infinitepush-scratchpush', |
|
177 | 177 | default=False, |
|
178 | 178 | ) |
|
179 | 179 | |
|
180 | 180 | experimental = 'experimental' |
|
181 | 181 | configbookmark = 'server-bundlestore-bookmark' |
|
182 | 182 | configscratchpush = 'infinitepush-scratchpush' |
|
183 | 183 | |
|
184 | 184 | scratchbranchparttype = bundleparts.scratchbranchparttype |
|
185 | 185 | revsetpredicate = registrar.revsetpredicate() |
|
186 | 186 | templatekeyword = registrar.templatekeyword() |
|
187 | 187 | _scratchbranchmatcher = lambda x: False |
|
188 | 188 | _maybehash = re.compile(r'^[a-f0-9]+$').search |
|
189 | 189 | |
|
190 | 190 | def _buildexternalbundlestore(ui): |
|
191 | 191 | put_args = ui.configlist('infinitepush', 'put_args', []) |
|
192 | 192 | put_binary = ui.config('infinitepush', 'put_binary') |
|
193 | 193 | if not put_binary: |
|
194 | 194 | raise error.Abort('put binary is not specified') |
|
195 | 195 | get_args = ui.configlist('infinitepush', 'get_args', []) |
|
196 | 196 | get_binary = ui.config('infinitepush', 'get_binary') |
|
197 | 197 | if not get_binary: |
|
198 | 198 | raise error.Abort('get binary is not specified') |
|
199 | 199 | from . import store |
|
200 | 200 | return store.externalbundlestore(put_binary, put_args, get_binary, get_args) |
|
201 | 201 | |
|
202 | 202 | def _buildsqlindex(ui): |
|
203 | 203 | sqlhost = ui.config('infinitepush', 'sqlhost') |
|
204 | 204 | if not sqlhost: |
|
205 | 205 | raise error.Abort(_('please set infinitepush.sqlhost')) |
|
206 | 206 | host, port, db, user, password = sqlhost.split(':') |
|
207 | 207 | reponame = ui.config('infinitepush', 'reponame') |
|
208 | 208 | if not reponame: |
|
209 | 209 | raise error.Abort(_('please set infinitepush.reponame')) |
|
210 | 210 | |
|
211 | 211 | logfile = ui.config('infinitepush', 'logfile', '') |
|
212 | 212 | waittimeout = ui.configint('infinitepush', 'waittimeout', 300) |
|
213 | 213 | locktimeout = ui.configint('infinitepush', 'locktimeout', 120) |
|
214 | 214 | from . import sqlindexapi |
|
215 | 215 | return sqlindexapi.sqlindexapi( |
|
216 | 216 | reponame, host, port, db, user, password, |
|
217 | 217 | logfile, _getloglevel(ui), waittimeout=waittimeout, |
|
218 | 218 | locktimeout=locktimeout) |
|
219 | 219 | |
|
220 | 220 | def _getloglevel(ui): |
|
221 | 221 | loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG') |
|
222 | 222 | numeric_loglevel = getattr(logging, loglevel.upper(), None) |
|
223 | 223 | if not isinstance(numeric_loglevel, int): |
|
224 | 224 | raise error.Abort(_('invalid log level %s') % loglevel) |
|
225 | 225 | return numeric_loglevel |
|
226 | 226 | |
|
227 | 227 | def _tryhoist(ui, remotebookmark): |
|
228 | 228 | '''returns a bookmarks with hoisted part removed |
|
229 | 229 | |
|
230 | 230 | Remotenames extension has a 'hoist' config that allows to use remote |
|
231 | 231 | bookmarks without specifying remote path. For example, 'hg update master' |
|
232 | 232 | works as well as 'hg update remote/master'. We want to allow the same in |
|
233 | 233 | infinitepush. |
|
234 | 234 | ''' |
|
235 | 235 | |
|
236 | 236 | if common.isremotebooksenabled(ui): |
|
237 | 237 | hoist = ui.config('remotenames', 'hoistedpeer') + '/' |
|
238 | 238 | if remotebookmark.startswith(hoist): |
|
239 | 239 | return remotebookmark[len(hoist):] |
|
240 | 240 | return remotebookmark |
|
241 | 241 | |
|
242 | 242 | class bundlestore(object): |
|
243 | 243 | def __init__(self, repo): |
|
244 | 244 | self._repo = repo |
|
245 | 245 | storetype = self._repo.ui.config('infinitepush', 'storetype') |
|
246 | 246 | if storetype == 'disk': |
|
247 | 247 | from . import store |
|
248 | 248 | self.store = store.filebundlestore(self._repo.ui, self._repo) |
|
249 | 249 | elif storetype == 'external': |
|
250 | 250 | self.store = _buildexternalbundlestore(self._repo.ui) |
|
251 | 251 | else: |
|
252 | 252 | raise error.Abort( |
|
253 | 253 | _('unknown infinitepush store type specified %s') % storetype) |
|
254 | 254 | |
|
255 | 255 | indextype = self._repo.ui.config('infinitepush', 'indextype') |
|
256 | 256 | if indextype == 'disk': |
|
257 | 257 | from . import fileindexapi |
|
258 | 258 | self.index = fileindexapi.fileindexapi(self._repo) |
|
259 | 259 | elif indextype == 'sql': |
|
260 | 260 | self.index = _buildsqlindex(self._repo.ui) |
|
261 | 261 | else: |
|
262 | 262 | raise error.Abort( |
|
263 | 263 | _('unknown infinitepush index type specified %s') % indextype) |
|
264 | 264 | |
|
265 | 265 | def _isserver(ui): |
|
266 | 266 | return ui.configbool('infinitepush', 'server') |
|
267 | 267 | |
|
268 | 268 | def reposetup(ui, repo): |
|
269 | 269 | if _isserver(ui) and repo.local(): |
|
270 | 270 | repo.bundlestore = bundlestore(repo) |
|
271 | 271 | |
|
272 | 272 | def extsetup(ui): |
|
273 | 273 | commonsetup(ui) |
|
274 | 274 | if _isserver(ui): |
|
275 | 275 | serverextsetup(ui) |
|
276 | 276 | else: |
|
277 | 277 | clientextsetup(ui) |
|
278 | 278 | |
|
279 | 279 | def commonsetup(ui): |
|
280 | 280 | wireproto.commands['listkeyspatterns'] = ( |
|
281 | 281 | wireprotolistkeyspatterns, 'namespace patterns') |
|
282 | 282 | scratchbranchpat = ui.config('infinitepush', 'branchpattern') |
|
283 | 283 | if scratchbranchpat: |
|
284 | 284 | global _scratchbranchmatcher |
|
285 | 285 | kind, pat, _scratchbranchmatcher = \ |
|
286 | 286 | stringutil.stringmatcher(scratchbranchpat) |
|
287 | 287 | |
|
288 | 288 | def serverextsetup(ui): |
|
289 | 289 | origpushkeyhandler = bundle2.parthandlermapping['pushkey'] |
|
290 | 290 | |
|
291 | 291 | def newpushkeyhandler(*args, **kwargs): |
|
292 | 292 | bundle2pushkey(origpushkeyhandler, *args, **kwargs) |
|
293 | 293 | newpushkeyhandler.params = origpushkeyhandler.params |
|
294 | 294 | bundle2.parthandlermapping['pushkey'] = newpushkeyhandler |
|
295 | 295 | |
|
296 | 296 | orighandlephasehandler = bundle2.parthandlermapping['phase-heads'] |
|
297 | 297 | newphaseheadshandler = lambda *args, **kwargs: \ |
|
298 | 298 | bundle2handlephases(orighandlephasehandler, *args, **kwargs) |
|
299 | 299 | newphaseheadshandler.params = orighandlephasehandler.params |
|
300 | 300 | bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler |
|
301 | 301 | |
|
302 | 302 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', |
|
303 | 303 | localrepolistkeys) |
|
304 | 304 | wireproto.commands['lookup'] = ( |
|
305 | 305 | _lookupwrap(wireproto.commands['lookup'][0]), 'key') |
|
306 | 306 | extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks) |
|
307 | 307 | |
|
308 | 308 | extensions.wrapfunction(bundle2, 'processparts', processparts) |
|
309 | 309 | |
|
310 | 310 | def clientextsetup(ui): |
|
311 | 311 | entry = extensions.wrapcommand(commands.table, 'push', _push) |
|
312 | 312 | |
|
313 | 313 | entry[1].append( |
|
314 | 314 | ('', 'bundle-store', None, |
|
315 | 315 | _('force push to go to bundle store (EXPERIMENTAL)'))) |
|
316 | 316 | |
|
317 | 317 | extensions.wrapcommand(commands.table, 'pull', _pull) |
|
318 | 318 | |
|
319 | 319 | extensions.wrapfunction(discovery, 'checkheads', _checkheads) |
|
320 | 320 | |
|
321 | 321 | wireproto.wirepeer.listkeyspatterns = listkeyspatterns |
|
322 | 322 | |
|
323 | 323 | partorder = exchange.b2partsgenorder |
|
324 | 324 | index = partorder.index('changeset') |
|
325 | 325 | partorder.insert( |
|
326 | 326 | index, partorder.pop(partorder.index(scratchbranchparttype))) |
|
327 | 327 | |
|
328 | 328 | def _checkheads(orig, pushop): |
|
329 | 329 | if pushop.ui.configbool(experimental, configscratchpush, False): |
|
330 | 330 | return |
|
331 | 331 | return orig(pushop) |
|
332 | 332 | |
|
333 | 333 | def wireprotolistkeyspatterns(repo, proto, namespace, patterns): |
|
334 | 334 | patterns = wireproto.decodelist(patterns) |
|
335 | 335 | d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems() |
|
336 | 336 | return pushkey.encodekeys(d) |
|
337 | 337 | |
|
338 | 338 | def localrepolistkeys(orig, self, namespace, patterns=None): |
|
339 | 339 | if namespace == 'bookmarks' and patterns: |
|
340 | 340 | index = self.bundlestore.index |
|
341 | 341 | results = {} |
|
342 | 342 | bookmarks = orig(self, namespace) |
|
343 | 343 | for pattern in patterns: |
|
344 | 344 | results.update(index.getbookmarks(pattern)) |
|
345 | 345 | if pattern.endswith('*'): |
|
346 | 346 | pattern = 're:^' + pattern[:-1] + '.*' |
|
347 | 347 | kind, pat, matcher = stringutil.stringmatcher(pattern) |
|
348 | 348 | for bookmark, node in bookmarks.iteritems(): |
|
349 | 349 | if matcher(bookmark): |
|
350 | 350 | results[bookmark] = node |
|
351 | 351 | return results |
|
352 | 352 | else: |
|
353 | 353 | return orig(self, namespace) |
|
354 | 354 | |
|
355 | 355 | @peer.batchable |
|
356 | 356 | def listkeyspatterns(self, namespace, patterns): |
|
357 | 357 | if not self.capable('pushkey'): |
|
358 | 358 | yield {}, None |
|
359 | 359 | f = peer.future() |
|
360 | 360 | self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' % |
|
361 | 361 | (namespace, patterns)) |
|
362 | 362 | yield { |
|
363 | 363 | 'namespace': encoding.fromlocal(namespace), |
|
364 | 364 | 'patterns': wireproto.encodelist(patterns) |
|
365 | 365 | }, f |
|
366 | 366 | d = f.value |
|
367 | 367 | self.ui.debug('received listkey for "%s": %i bytes\n' |
|
368 | 368 | % (namespace, len(d))) |
|
369 | 369 | yield pushkey.decodekeys(d) |
|
370 | 370 | |
|
371 | 371 | def _readbundlerevs(bundlerepo): |
|
372 | 372 | return list(bundlerepo.revs('bundle()')) |
|
373 | 373 | |
|
374 | 374 | def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): |
|
375 | 375 | '''Tells remotefilelog to include all changed files to the changegroup |
|
376 | 376 | |
|
377 | 377 | By default remotefilelog doesn't include file content to the changegroup. |
|
378 | 378 | But we need to include it if we are fetching from bundlestore. |
|
379 | 379 | ''' |
|
380 | 380 | changedfiles = set() |
|
381 | 381 | cl = bundlerepo.changelog |
|
382 | 382 | for r in bundlerevs: |
|
383 | 383 | # [3] means changed files |
|
384 | 384 | changedfiles.update(cl.read(r)[3]) |
|
385 | 385 | if not changedfiles: |
|
386 | 386 | return bundlecaps |
|
387 | 387 | |
|
388 | 388 | changedfiles = '\0'.join(changedfiles) |
|
389 | 389 | newcaps = [] |
|
390 | 390 | appended = False |
|
391 | 391 | for cap in (bundlecaps or []): |
|
392 | 392 | if cap.startswith('excludepattern='): |
|
393 | 393 | newcaps.append('\0'.join((cap, changedfiles))) |
|
394 | 394 | appended = True |
|
395 | 395 | else: |
|
396 | 396 | newcaps.append(cap) |
|
397 | 397 | if not appended: |
|
398 | 398 | # Not found excludepattern cap. Just append it |
|
399 | 399 | newcaps.append('excludepattern=' + changedfiles) |
|
400 | 400 | |
|
401 | 401 | return newcaps |
|
402 | 402 | |
|
403 | 403 | def _rebundle(bundlerepo, bundleroots, unknownhead): |
|
404 | 404 | ''' |
|
405 | 405 | Bundle may include more revision then user requested. For example, |
|
406 | 406 | if user asks for revision but bundle also consists its descendants. |
|
407 | 407 | This function will filter out all revision that user is not requested. |
|
408 | 408 | ''' |
|
409 | 409 | parts = [] |
|
410 | 410 | |
|
411 | 411 | version = '02' |
|
412 | 412 | outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots, |
|
413 | 413 | missingheads=[unknownhead]) |
|
414 | 414 | cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull') |
|
415 | 415 | cgstream = util.chunkbuffer(cgstream).read() |
|
416 | 416 | cgpart = bundle2.bundlepart('changegroup', data=cgstream) |
|
417 | 417 | cgpart.addparam('version', version) |
|
418 | 418 | parts.append(cgpart) |
|
419 | 419 | |
|
420 | 420 | return parts |
|
421 | 421 | |
|
422 | 422 | def _getbundleroots(oldrepo, bundlerepo, bundlerevs): |
|
423 | 423 | cl = bundlerepo.changelog |
|
424 | 424 | bundleroots = [] |
|
425 | 425 | for rev in bundlerevs: |
|
426 | 426 | node = cl.node(rev) |
|
427 | 427 | parents = cl.parents(node) |
|
428 | 428 | for parent in parents: |
|
429 | 429 | # include all revs that exist in the main repo |
|
430 | 430 | # to make sure that bundle may apply client-side |
|
431 | 431 | if parent in oldrepo: |
|
432 | 432 | bundleroots.append(parent) |
|
433 | 433 | return bundleroots |
|
434 | 434 | |
|
435 | 435 | def _needsrebundling(head, bundlerepo): |
|
436 | 436 | bundleheads = list(bundlerepo.revs('heads(bundle())')) |
|
437 | 437 | return not (len(bundleheads) == 1 and |
|
438 | 438 | bundlerepo[bundleheads[0]].node() == head) |
|
439 | 439 | |
|
440 | 440 | def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): |
|
441 | 441 | '''generates bundle that will be send to the user |
|
442 | 442 | |
|
443 | 443 | returns tuple with raw bundle string and bundle type |
|
444 | 444 | ''' |
|
445 | 445 | parts = [] |
|
446 | 446 | if not _needsrebundling(head, bundlerepo): |
|
447 | 447 | with util.posixfile(bundlefile, "rb") as f: |
|
448 | 448 | unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) |
|
449 | 449 | if isinstance(unbundler, changegroup.cg1unpacker): |
|
450 | 450 | part = bundle2.bundlepart('changegroup', |
|
451 | 451 | data=unbundler._stream.read()) |
|
452 | 452 | part.addparam('version', '01') |
|
453 | 453 | parts.append(part) |
|
454 | 454 | elif isinstance(unbundler, bundle2.unbundle20): |
|
455 | 455 | haschangegroup = False |
|
456 | 456 | for part in unbundler.iterparts(): |
|
457 | 457 | if part.type == 'changegroup': |
|
458 | 458 | haschangegroup = True |
|
459 | 459 | newpart = bundle2.bundlepart(part.type, data=part.read()) |
|
460 | 460 | for key, value in part.params.iteritems(): |
|
461 | 461 | newpart.addparam(key, value) |
|
462 | 462 | parts.append(newpart) |
|
463 | 463 | |
|
464 | 464 | if not haschangegroup: |
|
465 | 465 | raise error.Abort( |
|
466 | 466 | 'unexpected bundle without changegroup part, ' + |
|
467 | 467 | 'head: %s' % hex(head), |
|
468 | 468 | hint='report to administrator') |
|
469 | 469 | else: |
|
470 | 470 | raise error.Abort('unknown bundle type') |
|
471 | 471 | else: |
|
472 | 472 | parts = _rebundle(bundlerepo, bundleroots, head) |
|
473 | 473 | |
|
474 | 474 | return parts |
|
475 | 475 | |
|
476 | 476 | def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs): |
|
477 | 477 | heads = heads or [] |
|
478 | 478 | # newheads are parents of roots of scratch bundles that were requested |
|
479 | 479 | newphases = {} |
|
480 | 480 | scratchbundles = [] |
|
481 | 481 | newheads = [] |
|
482 | 482 | scratchheads = [] |
|
483 | 483 | nodestobundle = {} |
|
484 | 484 | allbundlestocleanup = [] |
|
485 | 485 | try: |
|
486 | 486 | for head in heads: |
|
487 | 487 | if head not in repo.changelog.nodemap: |
|
488 | 488 | if head not in nodestobundle: |
|
489 | 489 | newbundlefile = common.downloadbundle(repo, head) |
|
490 | 490 | bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile) |
|
491 | 491 | bundlerepo = hg.repository(repo.ui, bundlepath) |
|
492 | 492 | |
|
493 | 493 | allbundlestocleanup.append((bundlerepo, newbundlefile)) |
|
494 | 494 | bundlerevs = set(_readbundlerevs(bundlerepo)) |
|
495 | 495 | bundlecaps = _includefilelogstobundle( |
|
496 | 496 | bundlecaps, bundlerepo, bundlerevs, repo.ui) |
|
497 | 497 | cl = bundlerepo.changelog |
|
498 | 498 | bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs) |
|
499 | 499 | for rev in bundlerevs: |
|
500 | 500 | node = cl.node(rev) |
|
501 | 501 | newphases[hex(node)] = str(phases.draft) |
|
502 | 502 | nodestobundle[node] = (bundlerepo, bundleroots, |
|
503 | 503 | newbundlefile) |
|
504 | 504 | |
|
505 | 505 | scratchbundles.append( |
|
506 | 506 | _generateoutputparts(head, *nodestobundle[head])) |
|
507 | 507 | newheads.extend(bundleroots) |
|
508 | 508 | scratchheads.append(head) |
|
509 | 509 | finally: |
|
510 | 510 | for bundlerepo, bundlefile in allbundlestocleanup: |
|
511 | 511 | bundlerepo.close() |
|
512 | 512 | try: |
|
513 | 513 | os.unlink(bundlefile) |
|
514 | 514 | except (IOError, OSError): |
|
515 | 515 | # if we can't cleanup the file then just ignore the error, |
|
516 | 516 | # no need to fail |
|
517 | 517 | pass |
|
518 | 518 | |
|
519 | 519 | pullfrombundlestore = bool(scratchbundles) |
|
520 | 520 | wrappedchangegrouppart = False |
|
521 | 521 | wrappedlistkeys = False |
|
522 | 522 | oldchangegrouppart = exchange.getbundle2partsmapping['changegroup'] |
|
523 | 523 | try: |
|
524 | 524 | def _changegrouppart(bundler, *args, **kwargs): |
|
525 | 525 | # Order is important here. First add non-scratch part |
|
526 | 526 | # and only then add parts with scratch bundles because |
|
527 | 527 | # non-scratch part contains parents of roots of scratch bundles. |
|
528 | 528 | result = oldchangegrouppart(bundler, *args, **kwargs) |
|
529 | 529 | for bundle in scratchbundles: |
|
530 | 530 | for part in bundle: |
|
531 | 531 | bundler.addpart(part) |
|
532 | 532 | return result |
|
533 | 533 | |
|
534 | 534 | exchange.getbundle2partsmapping['changegroup'] = _changegrouppart |
|
535 | 535 | wrappedchangegrouppart = True |
|
536 | 536 | |
|
537 | 537 | def _listkeys(orig, self, namespace): |
|
538 | 538 | origvalues = orig(self, namespace) |
|
539 | 539 | if namespace == 'phases' and pullfrombundlestore: |
|
540 | 540 | if origvalues.get('publishing') == 'True': |
|
541 | 541 | # Make repo non-publishing to preserve draft phase |
|
542 | 542 | del origvalues['publishing'] |
|
543 | 543 | origvalues.update(newphases) |
|
544 | 544 | return origvalues |
|
545 | 545 | |
|
546 | 546 | extensions.wrapfunction(localrepo.localrepository, 'listkeys', |
|
547 | 547 | _listkeys) |
|
548 | 548 | wrappedlistkeys = True |
|
549 | 549 | heads = list((set(newheads) | set(heads)) - set(scratchheads)) |
|
550 | 550 | result = orig(repo, source, heads=heads, |
|
551 | 551 | bundlecaps=bundlecaps, **kwargs) |
|
552 | 552 | finally: |
|
553 | 553 | if wrappedchangegrouppart: |
|
554 | 554 | exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart |
|
555 | 555 | if wrappedlistkeys: |
|
556 | 556 | extensions.unwrapfunction(localrepo.localrepository, 'listkeys', |
|
557 | 557 | _listkeys) |
|
558 | 558 | return result |
|
559 | 559 | |
|
560 | 560 | def _lookupwrap(orig): |
|
561 | 561 | def _lookup(repo, proto, key): |
|
562 | 562 | localkey = encoding.tolocal(key) |
|
563 | 563 | |
|
564 | 564 | if isinstance(localkey, str) and _scratchbranchmatcher(localkey): |
|
565 | 565 | scratchnode = repo.bundlestore.index.getnode(localkey) |
|
566 | 566 | if scratchnode: |
|
567 | 567 | return "%s %s\n" % (1, scratchnode) |
|
568 | 568 | else: |
|
569 | 569 | return "%s %s\n" % (0, 'scratch branch %s not found' % localkey) |
|
570 | 570 | else: |
|
571 | 571 | try: |
|
572 | 572 | r = hex(repo.lookup(localkey)) |
|
573 | 573 | return "%s %s\n" % (1, r) |
|
574 | 574 | except Exception as inst: |
|
575 | 575 | if repo.bundlestore.index.getbundle(localkey): |
|
576 | 576 | return "%s %s\n" % (1, localkey) |
|
577 | 577 | else: |
|
578 | 578 | r = str(inst) |
|
579 | 579 | return "%s %s\n" % (0, r) |
|
580 | 580 | return _lookup |
|
581 | 581 | |
|
582 | 582 | def _pull(orig, ui, repo, source="default", **opts): |
|
583 | 583 | opts = pycompat.byteskwargs(opts) |
|
584 | 584 | # Copy paste from `pull` command |
|
585 | 585 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
|
586 | 586 | |
|
587 | 587 | scratchbookmarks = {} |
|
588 | 588 | unfi = repo.unfiltered() |
|
589 | 589 | unknownnodes = [] |
|
590 | 590 | for rev in opts.get('rev', []): |
|
591 | 591 | if rev not in unfi: |
|
592 | 592 | unknownnodes.append(rev) |
|
593 | 593 | if opts.get('bookmark'): |
|
594 | 594 | bookmarks = [] |
|
595 | 595 | revs = opts.get('rev') or [] |
|
596 | 596 | for bookmark in opts.get('bookmark'): |
|
597 | 597 | if _scratchbranchmatcher(bookmark): |
|
598 | 598 | # rev is not known yet |
|
599 | 599 | # it will be fetched with listkeyspatterns next |
|
600 | 600 | scratchbookmarks[bookmark] = 'REVTOFETCH' |
|
601 | 601 | else: |
|
602 | 602 | bookmarks.append(bookmark) |
|
603 | 603 | |
|
604 | 604 | if scratchbookmarks: |
|
605 | 605 | other = hg.peer(repo, opts, source) |
|
606 | 606 | fetchedbookmarks = other.listkeyspatterns( |
|
607 | 607 | 'bookmarks', patterns=scratchbookmarks) |
|
608 | 608 | for bookmark in scratchbookmarks: |
|
609 | 609 | if bookmark not in fetchedbookmarks: |
|
610 | 610 | raise error.Abort('remote bookmark %s not found!' % |
|
611 | 611 | bookmark) |
|
612 | 612 | scratchbookmarks[bookmark] = fetchedbookmarks[bookmark] |
|
613 | 613 | revs.append(fetchedbookmarks[bookmark]) |
|
614 | 614 | opts['bookmark'] = bookmarks |
|
615 | 615 | opts['rev'] = revs |
|
616 | 616 | |
|
617 | 617 | if scratchbookmarks or unknownnodes: |
|
618 | 618 | # Set anyincoming to True |
|
619 | 619 | extensions.wrapfunction(discovery, 'findcommonincoming', |
|
620 | 620 | _findcommonincoming) |
|
621 | 621 | try: |
|
622 | 622 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
623 | 623 | # know about them. Let's save it before pull and restore after |
|
624 | 624 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source) |
|
625 | 625 | result = orig(ui, repo, source, **pycompat.strkwargs(opts)) |
|
626 | 626 | # TODO(stash): race condition is possible |
|
627 | 627 | # if scratch bookmarks was updated right after orig. |
|
628 | 628 | # But that's unlikely and shouldn't be harmful. |
|
629 | 629 | if common.isremotebooksenabled(ui): |
|
630 | 630 | remotescratchbookmarks.update(scratchbookmarks) |
|
631 | 631 | _saveremotebookmarks(repo, remotescratchbookmarks, source) |
|
632 | 632 | else: |
|
633 | 633 | _savelocalbookmarks(repo, scratchbookmarks) |
|
634 | 634 | return result |
|
635 | 635 | finally: |
|
636 | 636 | if scratchbookmarks: |
|
637 | 637 | extensions.unwrapfunction(discovery, 'findcommonincoming') |
|
638 | 638 | |
|
639 | 639 | def _readscratchremotebookmarks(ui, repo, other): |
|
640 | 640 | if common.isremotebooksenabled(ui): |
|
641 | 641 | remotenamesext = extensions.find('remotenames') |
|
642 | 642 | remotepath = remotenamesext.activepath(repo.ui, other) |
|
643 | 643 | result = {} |
|
644 | 644 | # Let's refresh remotenames to make sure we have it up to date |
|
645 | 645 | # Seems that `repo.names['remotebookmarks']` may return stale bookmarks |
|
646 | 646 | # and it results in deleting scratch bookmarks. Our best guess how to |
|
647 | 647 | # fix it is to use `clearnames()` |
|
648 | 648 | repo._remotenames.clearnames() |
|
649 | 649 | for remotebookmark in repo.names['remotebookmarks'].listnames(repo): |
|
650 | 650 | path, bookname = remotenamesext.splitremotename(remotebookmark) |
|
651 | 651 | if path == remotepath and _scratchbranchmatcher(bookname): |
|
652 | 652 | nodes = repo.names['remotebookmarks'].nodes(repo, |
|
653 | 653 | remotebookmark) |
|
654 | 654 | if nodes: |
|
655 | 655 | result[bookname] = hex(nodes[0]) |
|
656 | 656 | return result |
|
657 | 657 | else: |
|
658 | 658 | return {} |
|
659 | 659 | |
|
660 | 660 | def _saveremotebookmarks(repo, newbookmarks, remote): |
|
661 | 661 | remotenamesext = extensions.find('remotenames') |
|
662 | 662 | remotepath = remotenamesext.activepath(repo.ui, remote) |
|
663 | 663 | branches = collections.defaultdict(list) |
|
664 | 664 | bookmarks = {} |
|
665 | 665 | remotenames = remotenamesext.readremotenames(repo) |
|
666 | 666 | for hexnode, nametype, remote, rname in remotenames: |
|
667 | 667 | if remote != remotepath: |
|
668 | 668 | continue |
|
669 | 669 | if nametype == 'bookmarks': |
|
670 | 670 | if rname in newbookmarks: |
|
671 | 671 | # It's possible if we have a normal bookmark that matches |
|
672 | 672 | # scratch branch pattern. In this case just use the current |
|
673 | 673 | # bookmark node |
|
674 | 674 | del newbookmarks[rname] |
|
675 | 675 | bookmarks[rname] = hexnode |
|
676 | 676 | elif nametype == 'branches': |
|
677 | 677 | # saveremotenames expects 20 byte binary nodes for branches |
|
678 | 678 | branches[rname].append(bin(hexnode)) |
|
679 | 679 | |
|
680 | 680 | for bookmark, hexnode in newbookmarks.iteritems(): |
|
681 | 681 | bookmarks[bookmark] = hexnode |
|
682 | 682 | remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) |
|
683 | 683 | |
|
684 | 684 | def _savelocalbookmarks(repo, bookmarks): |
|
685 | 685 | if not bookmarks: |
|
686 | 686 | return |
|
687 | 687 | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: |
|
688 | 688 | changes = [] |
|
689 | 689 | for scratchbook, node in bookmarks.iteritems(): |
|
690 | 690 | changectx = repo[node] |
|
691 | 691 | changes.append((scratchbook, changectx.node())) |
|
692 | 692 | repo._bookmarks.applychanges(repo, tr, changes) |
|
693 | 693 | |
|
694 | 694 | def _findcommonincoming(orig, *args, **kwargs): |
|
695 | 695 | common, inc, remoteheads = orig(*args, **kwargs) |
|
696 | 696 | return common, True, remoteheads |
|
697 | 697 | |
|
698 | 698 | def _push(orig, ui, repo, dest=None, *args, **opts): |
|
699 | 699 | |
|
700 | 700 | bookmark = opts.get(r'bookmark') |
|
701 | 701 | # we only support pushing one infinitepush bookmark at once |
|
702 | 702 | if len(bookmark) == 1: |
|
703 | 703 | bookmark = bookmark[0] |
|
704 | 704 | else: |
|
705 | 705 | bookmark = '' |
|
706 | 706 | |
|
707 | 707 | oldphasemove = None |
|
708 | 708 | overrides = {(experimental, configbookmark): bookmark} |
|
709 | 709 | |
|
710 | 710 | with ui.configoverride(overrides, 'infinitepush'): |
|
711 | 711 | scratchpush = opts.get('bundle_store') |
|
712 | 712 | if _scratchbranchmatcher(bookmark): |
|
713 | 713 | scratchpush = True |
|
714 | 714 | # bundle2 can be sent back after push (for example, bundle2 |
|
715 | 715 | # containing `pushkey` part to update bookmarks) |
|
716 | 716 | ui.setconfig(experimental, 'bundle2.pushback', True) |
|
717 | 717 | |
|
718 | 718 | if scratchpush: |
|
719 | 719 | # this is an infinitepush, we don't want the bookmark to be applied |
|
720 | 720 | # rather that should be stored in the bundlestore |
|
721 | 721 | opts[r'bookmark'] = [] |
|
722 | 722 | ui.setconfig(experimental, configscratchpush, True) |
|
723 | 723 | oldphasemove = extensions.wrapfunction(exchange, |
|
724 | 724 | '_localphasemove', |
|
725 | 725 | _phasemove) |
|
726 | 726 | # Copy-paste from `push` command |
|
727 | 727 | path = ui.paths.getpath(dest, default=('default-push', 'default')) |
|
728 | 728 | if not path: |
|
729 | 729 | raise error.Abort(_('default repository not configured!'), |
|
730 | 730 | hint=_("see 'hg help config.paths'")) |
|
731 | 731 | destpath = path.pushloc or path.loc |
|
732 | 732 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
733 | 733 | # know about them. Let's save it before push and restore after |
|
734 | 734 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) |
|
735 | 735 | result = orig(ui, repo, dest, *args, **opts) |
|
736 | 736 | if common.isremotebooksenabled(ui): |
|
737 | 737 | if bookmark and scratchpush: |
|
738 | 738 | other = hg.peer(repo, opts, destpath) |
|
739 | 739 | fetchedbookmarks = other.listkeyspatterns('bookmarks', |
|
740 | 740 | patterns=[bookmark]) |
|
741 | 741 | remotescratchbookmarks.update(fetchedbookmarks) |
|
742 | 742 | _saveremotebookmarks(repo, remotescratchbookmarks, destpath) |
|
743 | 743 | if oldphasemove: |
|
744 | 744 | exchange._localphasemove = oldphasemove |
|
745 | 745 | return result |
|
746 | 746 | |
|
747 | 747 | def _deleteinfinitepushbookmarks(ui, repo, path, names): |
|
748 | 748 | """Prune remote names by removing the bookmarks we don't want anymore, |
|
749 | 749 | then writing the result back to disk |
|
750 | 750 | """ |
|
751 | 751 | remotenamesext = extensions.find('remotenames') |
|
752 | 752 | |
|
753 | 753 | # remotename format is: |
|
754 | 754 | # (node, nametype ("branches" or "bookmarks"), remote, name) |
|
755 | 755 | nametype_idx = 1 |
|
756 | 756 | remote_idx = 2 |
|
757 | 757 | name_idx = 3 |
|
758 | 758 | remotenames = [remotename for remotename in \ |
|
759 | 759 | remotenamesext.readremotenames(repo) \ |
|
760 | 760 | if remotename[remote_idx] == path] |
|
761 | 761 | remote_bm_names = [remotename[name_idx] for remotename in \ |
|
762 | 762 | remotenames if remotename[nametype_idx] == "bookmarks"] |
|
763 | 763 | |
|
764 | 764 | for name in names: |
|
765 | 765 | if name not in remote_bm_names: |
|
766 | 766 | raise error.Abort(_("infinitepush bookmark '{}' does not exist " |
|
767 | 767 | "in path '{}'").format(name, path)) |
|
768 | 768 | |
|
769 | 769 | bookmarks = {} |
|
770 | 770 | branches = collections.defaultdict(list) |
|
771 | 771 | for node, nametype, remote, name in remotenames: |
|
772 | 772 | if nametype == "bookmarks" and name not in names: |
|
773 | 773 | bookmarks[name] = node |
|
774 | 774 | elif nametype == "branches": |
|
775 | 775 | # saveremotenames wants binary nodes for branches |
|
776 | 776 | branches[name].append(bin(node)) |
|
777 | 777 | |
|
778 | 778 | remotenamesext.saveremotenames(repo, path, branches, bookmarks) |
|
779 | 779 | |
|
780 | 780 | def _phasemove(orig, pushop, nodes, phase=phases.public): |
|
781 | 781 | """prevent commits from being marked public |
|
782 | 782 | |
|
783 | 783 | Since these are going to a scratch branch, they aren't really being |
|
784 | 784 | published.""" |
|
785 | 785 | |
|
786 | 786 | if phase != phases.public: |
|
787 | 787 | orig(pushop, nodes, phase) |
|
788 | 788 | |
|
789 | 789 | @exchange.b2partsgenerator(scratchbranchparttype) |
|
790 | 790 | def partgen(pushop, bundler): |
|
791 | 791 | bookmark = pushop.ui.config(experimental, configbookmark) |
|
792 | 792 | scratchpush = pushop.ui.configbool(experimental, configscratchpush) |
|
793 | 793 | if 'changesets' in pushop.stepsdone or not scratchpush: |
|
794 | 794 | return |
|
795 | 795 | |
|
796 | 796 | if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote): |
|
797 | 797 | return |
|
798 | 798 | |
|
799 | 799 | pushop.stepsdone.add('changesets') |
|
800 | 800 | if not pushop.outgoing.missing: |
|
801 | 801 | pushop.ui.status(_('no changes found\n')) |
|
802 | 802 | pushop.cgresult = 0 |
|
803 | 803 | return |
|
804 | 804 | |
|
805 | 805 | # This parameter tells the server that the following bundle is an |
|
806 | 806 | # infinitepush. This let's it switch the part processing to our infinitepush |
|
807 | 807 | # code path. |
|
808 | 808 | bundler.addparam("infinitepush", "True") |
|
809 | 809 | |
|
810 | 810 | scratchparts = bundleparts.getscratchbranchparts(pushop.repo, |
|
811 | 811 | pushop.remote, |
|
812 | 812 | pushop.outgoing, |
|
813 | 813 | pushop.ui, |
|
814 | 814 | bookmark) |
|
815 | 815 | |
|
816 | 816 | for scratchpart in scratchparts: |
|
817 | 817 | bundler.addpart(scratchpart) |
|
818 | 818 | |
|
819 | 819 | def handlereply(op): |
|
820 | 820 | # server either succeeds or aborts; no code to read |
|
821 | 821 | pushop.cgresult = 1 |
|
822 | 822 | |
|
823 | 823 | return handlereply |
|
824 | 824 | |
|
825 | 825 | bundle2.capabilities[bundleparts.scratchbranchparttype] = () |
|
826 | 826 | |
|
827 | 827 | def _getrevs(bundle, oldnode, force, bookmark): |
|
828 | 828 | 'extracts and validates the revs to be imported' |
|
829 | 829 | revs = [bundle[r] for r in bundle.revs('sort(bundle())')] |
|
830 | 830 | |
|
831 | 831 | # new bookmark |
|
832 | 832 | if oldnode is None: |
|
833 | 833 | return revs |
|
834 | 834 | |
|
835 | 835 | # Fast forward update |
|
836 | 836 | if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)): |
|
837 | 837 | return revs |
|
838 | 838 | |
|
839 | 839 | return revs |
|
840 | 840 | |
|
841 | 841 | @contextlib.contextmanager |
|
842 | 842 | def logservicecall(logger, service, **kwargs): |
|
843 | 843 | start = time.time() |
|
844 | 844 | logger(service, eventtype='start', **kwargs) |
|
845 | 845 | try: |
|
846 | 846 | yield |
|
847 | 847 | logger(service, eventtype='success', |
|
848 | 848 | elapsedms=(time.time() - start) * 1000, **kwargs) |
|
849 | 849 | except Exception as e: |
|
850 | 850 | logger(service, eventtype='failure', |
|
851 | 851 | elapsedms=(time.time() - start) * 1000, errormsg=str(e), |
|
852 | 852 | **kwargs) |
|
853 | 853 | raise |
|
854 | 854 | |
|
855 | 855 | def _getorcreateinfinitepushlogger(op): |
|
856 | 856 | logger = op.records['infinitepushlogger'] |
|
857 | 857 | if not logger: |
|
858 | 858 | ui = op.repo.ui |
|
859 | 859 | try: |
|
860 | 860 | username = procutil.getuser() |
|
861 | 861 | except Exception: |
|
862 | 862 | username = 'unknown' |
|
863 | 863 | # Generate random request id to be able to find all logged entries |
|
864 | 864 | # for the same request. Since requestid is pseudo-generated it may |
|
865 | 865 | # not be unique, but we assume that (hostname, username, requestid) |
|
866 | 866 | # is unique. |
|
867 | 867 | random.seed() |
|
868 | 868 | requestid = random.randint(0, 2000000000) |
|
869 | 869 | hostname = socket.gethostname() |
|
870 | 870 | logger = functools.partial(ui.log, 'infinitepush', user=username, |
|
871 | 871 | requestid=requestid, hostname=hostname, |
|
872 | 872 | reponame=ui.config('infinitepush', |
|
873 | 873 | 'reponame')) |
|
874 | 874 | op.records.add('infinitepushlogger', logger) |
|
875 | 875 | else: |
|
876 | 876 | logger = logger[0] |
|
877 | 877 | return logger |
|
878 | 878 | |
|
879 | 879 | def storetobundlestore(orig, repo, op, unbundler): |
|
880 | 880 | """stores the incoming bundle coming from push command to the bundlestore |
|
881 | 881 | instead of applying on the revlogs""" |
|
882 | 882 | |
|
883 | 883 | repo.ui.status(_("storing changesets on the bundlestore\n")) |
|
884 | 884 | bundler = bundle2.bundle20(repo.ui) |
|
885 | 885 | |
|
886 | 886 | # processing each part and storing it in bundler |
|
887 | 887 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
888 | 888 | for part in parts: |
|
889 | 889 | bundlepart = None |
|
890 | 890 | if part.type == 'replycaps': |
|
891 | 891 | # This configures the current operation to allow reply parts. |
|
892 | 892 | bundle2._processpart(op, part) |
|
893 | 893 | else: |
|
894 | 894 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
895 | 895 | for key, value in part.params.iteritems(): |
|
896 | 896 | bundlepart.addparam(key, value) |
|
897 | 897 | |
|
898 | 898 | # Certain parts require a response |
|
899 | 899 | if part.type in ('pushkey', 'changegroup'): |
|
900 | 900 | if op.reply is not None: |
|
901 | 901 | rpart = op.reply.newpart('reply:%s' % part.type) |
|
902 | 902 | rpart.addparam('in-reply-to', str(part.id), |
|
903 | 903 | mandatory=False) |
|
904 | 904 | rpart.addparam('return', '1', mandatory=False) |
|
905 | 905 | |
|
906 | 906 | op.records.add(part.type, { |
|
907 | 907 | 'return': 1, |
|
908 | 908 | }) |
|
909 | 909 | if bundlepart: |
|
910 | 910 | bundler.addpart(bundlepart) |
|
911 | 911 | |
|
912 | 912 | # storing the bundle in the bundlestore |
|
913 | 913 | buf = util.chunkbuffer(bundler.getchunks()) |
|
914 | 914 | fd, bundlefile = tempfile.mkstemp() |
|
915 | 915 | try: |
|
916 | 916 | try: |
|
917 | 917 | fp = os.fdopen(fd, r'wb') |
|
918 | 918 | fp.write(buf.read()) |
|
919 | 919 | finally: |
|
920 | 920 | fp.close() |
|
921 | 921 | storebundle(op, {}, bundlefile) |
|
922 | 922 | finally: |
|
923 | 923 | try: |
|
924 | 924 | os.unlink(bundlefile) |
|
925 | 925 | except Exception: |
|
926 | 926 | # we would rather see the original exception |
|
927 | 927 | pass |
|
928 | 928 | |
|
929 | 929 | def processparts(orig, repo, op, unbundler): |
|
930 | 930 | |
|
931 | 931 | # make sure we don't wrap processparts in case of `hg unbundle` |
|
932 | 932 | if op.source == 'unbundle': |
|
933 | 933 | return orig(repo, op, unbundler) |
|
934 | 934 | |
|
935 | 935 | # this server routes each push to bundle store |
|
936 | 936 | if repo.ui.configbool('infinitepush', 'pushtobundlestore'): |
|
937 | 937 | return storetobundlestore(orig, repo, op, unbundler) |
|
938 | 938 | |
|
939 | 939 | if unbundler.params.get('infinitepush') != 'True': |
|
940 | 940 | return orig(repo, op, unbundler) |
|
941 | 941 | |
|
942 | 942 | handleallparts = repo.ui.configbool('infinitepush', 'storeallparts') |
|
943 | 943 | |
|
944 | 944 | bundler = bundle2.bundle20(repo.ui) |
|
945 | 945 | cgparams = None |
|
946 | 946 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
947 | 947 | for part in parts: |
|
948 | 948 | bundlepart = None |
|
949 | 949 | if part.type == 'replycaps': |
|
950 | 950 | # This configures the current operation to allow reply parts. |
|
951 | 951 | bundle2._processpart(op, part) |
|
952 | 952 | elif part.type == bundleparts.scratchbranchparttype: |
|
953 | 953 | # Scratch branch parts need to be converted to normal |
|
954 | 954 | # changegroup parts, and the extra parameters stored for later |
|
955 | 955 | # when we upload to the store. Eventually those parameters will |
|
956 | 956 | # be put on the actual bundle instead of this part, then we can |
|
957 | 957 | # send a vanilla changegroup instead of the scratchbranch part. |
|
958 | 958 | cgversion = part.params.get('cgversion', '01') |
|
959 | 959 | bundlepart = bundle2.bundlepart('changegroup', data=part.read()) |
|
960 | 960 | bundlepart.addparam('version', cgversion) |
|
961 | 961 | cgparams = part.params |
|
962 | 962 | |
|
963 | 963 | # If we're not dumping all parts into the new bundle, we need to |
|
964 | 964 | # alert the future pushkey and phase-heads handler to skip |
|
965 | 965 | # the part. |
|
966 | 966 | if not handleallparts: |
|
967 | 967 | op.records.add(scratchbranchparttype + '_skippushkey', True) |
|
968 | 968 | op.records.add(scratchbranchparttype + '_skipphaseheads', |
|
969 | 969 | True) |
|
970 | 970 | else: |
|
971 | 971 | if handleallparts: |
|
972 | 972 | # Ideally we would not process any parts, and instead just |
|
973 | 973 | # forward them to the bundle for storage, but since this |
|
974 | 974 | # differs from previous behavior, we need to put it behind a |
|
975 | 975 | # config flag for incremental rollout. |
|
976 | 976 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
977 | 977 | for key, value in part.params.iteritems(): |
|
978 | 978 | bundlepart.addparam(key, value) |
|
979 | 979 | |
|
980 | 980 | # Certain parts require a response |
|
981 | 981 | if part.type == 'pushkey': |
|
982 | 982 | if op.reply is not None: |
|
983 | 983 | rpart = op.reply.newpart('reply:pushkey') |
|
984 | 984 | rpart.addparam('in-reply-to', str(part.id), |
|
985 | 985 | mandatory=False) |
|
986 | 986 | rpart.addparam('return', '1', mandatory=False) |
|
987 | 987 | else: |
|
988 | 988 | bundle2._processpart(op, part) |
|
989 | 989 | |
|
990 | 990 | if handleallparts: |
|
991 | 991 | op.records.add(part.type, { |
|
992 | 992 | 'return': 1, |
|
993 | 993 | }) |
|
994 | 994 | if bundlepart: |
|
995 | 995 | bundler.addpart(bundlepart) |
|
996 | 996 | |
|
997 | 997 | # If commits were sent, store them |
|
998 | 998 | if cgparams: |
|
999 | 999 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1000 | 1000 | fd, bundlefile = tempfile.mkstemp() |
|
1001 | 1001 | try: |
|
1002 | 1002 | try: |
|
1003 | 1003 | fp = os.fdopen(fd, r'wb') |
|
1004 | 1004 | fp.write(buf.read()) |
|
1005 | 1005 | finally: |
|
1006 | 1006 | fp.close() |
|
1007 | 1007 | storebundle(op, cgparams, bundlefile) |
|
1008 | 1008 | finally: |
|
1009 | 1009 | try: |
|
1010 | 1010 | os.unlink(bundlefile) |
|
1011 | 1011 | except Exception: |
|
1012 | 1012 | # we would rather see the original exception |
|
1013 | 1013 | pass |
|
1014 | 1014 | |
|
1015 | 1015 | def storebundle(op, params, bundlefile): |
|
1016 | 1016 | log = _getorcreateinfinitepushlogger(op) |
|
1017 | 1017 | parthandlerstart = time.time() |
|
1018 | 1018 | log(scratchbranchparttype, eventtype='start') |
|
1019 | 1019 | index = op.repo.bundlestore.index |
|
1020 | 1020 | store = op.repo.bundlestore.store |
|
1021 | 1021 | op.records.add(scratchbranchparttype + '_skippushkey', True) |
|
1022 | 1022 | |
|
1023 | 1023 | bundle = None |
|
1024 | 1024 | try: # guards bundle |
|
1025 | 1025 | bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile) |
|
1026 | 1026 | bundle = hg.repository(op.repo.ui, bundlepath) |
|
1027 | 1027 | |
|
1028 | 1028 | bookmark = params.get('bookmark') |
|
1029 | 1029 | bookprevnode = params.get('bookprevnode', '') |
|
1030 | 1030 | force = params.get('force') |
|
1031 | 1031 | |
|
1032 | 1032 | if bookmark: |
|
1033 | 1033 | oldnode = index.getnode(bookmark) |
|
1034 | 1034 | else: |
|
1035 | 1035 | oldnode = None |
|
1036 | 1036 | bundleheads = bundle.revs('heads(bundle())') |
|
1037 | 1037 | if bookmark and len(bundleheads) > 1: |
|
1038 | 1038 | raise error.Abort( |
|
1039 | 1039 | _('cannot push more than one head to a scratch branch')) |
|
1040 | 1040 | |
|
1041 | 1041 | revs = _getrevs(bundle, oldnode, force, bookmark) |
|
1042 | 1042 | |
|
1043 | 1043 | # Notify the user of what is being pushed |
|
1044 | 1044 | plural = 's' if len(revs) > 1 else '' |
|
1045 |
op.repo.ui.warn(_("pushing % |
|
|
1045 | op.repo.ui.warn(_("pushing %d commit%s:\n") % (len(revs), plural)) | |
|
1046 | 1046 | maxoutput = 10 |
|
1047 | 1047 | for i in range(0, min(len(revs), maxoutput)): |
|
1048 | 1048 | firstline = bundle[revs[i]].description().split('\n')[0][:50] |
|
1049 | 1049 | op.repo.ui.warn((" %s %s\n") % (revs[i], firstline)) |
|
1050 | 1050 | |
|
1051 | 1051 | if len(revs) > maxoutput + 1: |
|
1052 | 1052 | op.repo.ui.warn((" ...\n")) |
|
1053 | 1053 | firstline = bundle[revs[-1]].description().split('\n')[0][:50] |
|
1054 | 1054 | op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline)) |
|
1055 | 1055 | |
|
1056 | 1056 | nodesctx = [bundle[rev] for rev in revs] |
|
1057 | 1057 | inindex = lambda rev: bool(index.getbundle(bundle[rev].hex())) |
|
1058 | 1058 | if bundleheads: |
|
1059 | 1059 | newheadscount = sum(not inindex(rev) for rev in bundleheads) |
|
1060 | 1060 | else: |
|
1061 | 1061 | newheadscount = 0 |
|
1062 | 1062 | # If there's a bookmark specified, there should be only one head, |
|
1063 | 1063 | # so we choose the last node, which will be that head. |
|
1064 | 1064 | # If a bug or malicious client allows there to be a bookmark |
|
1065 | 1065 | # with multiple heads, we will place the bookmark on the last head. |
|
1066 | 1066 | bookmarknode = nodesctx[-1].hex() if nodesctx else None |
|
1067 | 1067 | key = None |
|
1068 | 1068 | if newheadscount: |
|
1069 | 1069 | with open(bundlefile, 'r') as f: |
|
1070 | 1070 | bundledata = f.read() |
|
1071 | 1071 | with logservicecall(log, 'bundlestore', |
|
1072 | 1072 | bundlesize=len(bundledata)): |
|
1073 | 1073 | bundlesizelimit = 100 * 1024 * 1024 # 100 MB |
|
1074 | 1074 | if len(bundledata) > bundlesizelimit: |
|
1075 | 1075 | error_msg = ('bundle is too big: %d bytes. ' + |
|
1076 | 1076 | 'max allowed size is 100 MB') |
|
1077 | 1077 | raise error.Abort(error_msg % (len(bundledata),)) |
|
1078 | 1078 | key = store.write(bundledata) |
|
1079 | 1079 | |
|
1080 | 1080 | with logservicecall(log, 'index', newheadscount=newheadscount), index: |
|
1081 | 1081 | if key: |
|
1082 | 1082 | index.addbundle(key, nodesctx) |
|
1083 | 1083 | if bookmark: |
|
1084 | 1084 | index.addbookmark(bookmark, bookmarknode) |
|
1085 | 1085 | _maybeaddpushbackpart(op, bookmark, bookmarknode, |
|
1086 | 1086 | bookprevnode, params) |
|
1087 | 1087 | log(scratchbranchparttype, eventtype='success', |
|
1088 | 1088 | elapsedms=(time.time() - parthandlerstart) * 1000) |
|
1089 | 1089 | |
|
1090 | 1090 | except Exception as e: |
|
1091 | 1091 | log(scratchbranchparttype, eventtype='failure', |
|
1092 | 1092 | elapsedms=(time.time() - parthandlerstart) * 1000, |
|
1093 | 1093 | errormsg=str(e)) |
|
1094 | 1094 | raise |
|
1095 | 1095 | finally: |
|
1096 | 1096 | if bundle: |
|
1097 | 1097 | bundle.close() |
|
1098 | 1098 | |
|
1099 | 1099 | @bundle2.parthandler(scratchbranchparttype, |
|
1100 | 1100 | ('bookmark', 'bookprevnode', 'force', |
|
1101 | 1101 | 'pushbackbookmarks', 'cgversion')) |
|
1102 | 1102 | def bundle2scratchbranch(op, part): |
|
1103 | 1103 | '''unbundle a bundle2 part containing a changegroup to store''' |
|
1104 | 1104 | |
|
1105 | 1105 | bundler = bundle2.bundle20(op.repo.ui) |
|
1106 | 1106 | cgversion = part.params.get('cgversion', '01') |
|
1107 | 1107 | cgpart = bundle2.bundlepart('changegroup', data=part.read()) |
|
1108 | 1108 | cgpart.addparam('version', cgversion) |
|
1109 | 1109 | bundler.addpart(cgpart) |
|
1110 | 1110 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1111 | 1111 | |
|
1112 | 1112 | fd, bundlefile = tempfile.mkstemp() |
|
1113 | 1113 | try: |
|
1114 | 1114 | try: |
|
1115 | 1115 | fp = os.fdopen(fd, r'wb') |
|
1116 | 1116 | fp.write(buf.read()) |
|
1117 | 1117 | finally: |
|
1118 | 1118 | fp.close() |
|
1119 | 1119 | storebundle(op, part.params, bundlefile) |
|
1120 | 1120 | finally: |
|
1121 | 1121 | try: |
|
1122 | 1122 | os.unlink(bundlefile) |
|
1123 | 1123 | except OSError as e: |
|
1124 | 1124 | if e.errno != errno.ENOENT: |
|
1125 | 1125 | raise |
|
1126 | 1126 | |
|
1127 | 1127 | return 1 |
|
1128 | 1128 | |
|
1129 | 1129 | def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): |
|
1130 | 1130 | if params.get('pushbackbookmarks'): |
|
1131 | 1131 | if op.reply and 'pushback' in op.reply.capabilities: |
|
1132 | 1132 | params = { |
|
1133 | 1133 | 'namespace': 'bookmarks', |
|
1134 | 1134 | 'key': bookmark, |
|
1135 | 1135 | 'new': newnode, |
|
1136 | 1136 | 'old': oldnode, |
|
1137 | 1137 | } |
|
1138 | 1138 | op.reply.newpart('pushkey', mandatoryparams=params.iteritems()) |
|
1139 | 1139 | |
|
1140 | 1140 | def bundle2pushkey(orig, op, part): |
|
1141 | 1141 | '''Wrapper of bundle2.handlepushkey() |
|
1142 | 1142 | |
|
1143 | 1143 | The only goal is to skip calling the original function if flag is set. |
|
1144 | 1144 | It's set if infinitepush push is happening. |
|
1145 | 1145 | ''' |
|
1146 | 1146 | if op.records[scratchbranchparttype + '_skippushkey']: |
|
1147 | 1147 | if op.reply is not None: |
|
1148 | 1148 | rpart = op.reply.newpart('reply:pushkey') |
|
1149 | 1149 | rpart.addparam('in-reply-to', str(part.id), mandatory=False) |
|
1150 | 1150 | rpart.addparam('return', '1', mandatory=False) |
|
1151 | 1151 | return 1 |
|
1152 | 1152 | |
|
1153 | 1153 | return orig(op, part) |
|
1154 | 1154 | |
|
1155 | 1155 | def bundle2handlephases(orig, op, part): |
|
1156 | 1156 | '''Wrapper of bundle2.handlephases() |
|
1157 | 1157 | |
|
1158 | 1158 | The only goal is to skip calling the original function if flag is set. |
|
1159 | 1159 | It's set if infinitepush push is happening. |
|
1160 | 1160 | ''' |
|
1161 | 1161 | |
|
1162 | 1162 | if op.records[scratchbranchparttype + '_skipphaseheads']: |
|
1163 | 1163 | return |
|
1164 | 1164 | |
|
1165 | 1165 | return orig(op, part) |
|
1166 | 1166 | |
|
1167 | 1167 | def _asyncsavemetadata(root, nodes): |
|
1168 | 1168 | '''starts a separate process that fills metadata for the nodes |
|
1169 | 1169 | |
|
1170 | 1170 | This function creates a separate process and doesn't wait for it's |
|
1171 | 1171 | completion. This was done to avoid slowing down pushes |
|
1172 | 1172 | ''' |
|
1173 | 1173 | |
|
1174 | 1174 | maxnodes = 50 |
|
1175 | 1175 | if len(nodes) > maxnodes: |
|
1176 | 1176 | return |
|
1177 | 1177 | nodesargs = [] |
|
1178 | 1178 | for node in nodes: |
|
1179 | 1179 | nodesargs.append('--node') |
|
1180 | 1180 | nodesargs.append(node) |
|
1181 | 1181 | with open(os.devnull, 'w+b') as devnull: |
|
1182 | 1182 | cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata', |
|
1183 | 1183 | '-R', root] + nodesargs |
|
1184 | 1184 | # Process will run in background. We don't care about the return code |
|
1185 | 1185 | subprocess.Popen(cmdline, close_fds=True, shell=False, |
|
1186 | 1186 | stdin=devnull, stdout=devnull, stderr=devnull) |
@@ -1,1141 +1,1141 b'' | |||
|
1 | 1 | # hg.py - repository classes for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | import hashlib |
|
13 | 13 | import os |
|
14 | 14 | import shutil |
|
15 | 15 | import stat |
|
16 | 16 | |
|
17 | 17 | from .i18n import _ |
|
18 | 18 | from .node import ( |
|
19 | 19 | nullid, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | from . import ( |
|
23 | 23 | bookmarks, |
|
24 | 24 | bundlerepo, |
|
25 | 25 | cacheutil, |
|
26 | 26 | cmdutil, |
|
27 | 27 | destutil, |
|
28 | 28 | discovery, |
|
29 | 29 | error, |
|
30 | 30 | exchange, |
|
31 | 31 | extensions, |
|
32 | 32 | httppeer, |
|
33 | 33 | localrepo, |
|
34 | 34 | lock, |
|
35 | 35 | logcmdutil, |
|
36 | 36 | logexchange, |
|
37 | 37 | merge as mergemod, |
|
38 | 38 | node, |
|
39 | 39 | phases, |
|
40 | 40 | scmutil, |
|
41 | 41 | sshpeer, |
|
42 | 42 | statichttprepo, |
|
43 | 43 | ui as uimod, |
|
44 | 44 | unionrepo, |
|
45 | 45 | url, |
|
46 | 46 | util, |
|
47 | 47 | verify as verifymod, |
|
48 | 48 | vfs as vfsmod, |
|
49 | 49 | ) |
|
50 | 50 | |
|
51 | 51 | from .utils import ( |
|
52 | 52 | stringutil, |
|
53 | 53 | ) |
|
54 | 54 | |
|
55 | 55 | release = lock.release |
|
56 | 56 | |
|
57 | 57 | # shared features |
|
58 | 58 | sharedbookmarks = 'bookmarks' |
|
59 | 59 | |
|
60 | 60 | def _local(path): |
|
61 | 61 | path = util.expandpath(util.urllocalpath(path)) |
|
62 | 62 | return (os.path.isfile(path) and bundlerepo or localrepo) |
|
63 | 63 | |
|
64 | 64 | def addbranchrevs(lrepo, other, branches, revs): |
|
65 | 65 | peer = other.peer() # a courtesy to callers using a localrepo for other |
|
66 | 66 | hashbranch, branches = branches |
|
67 | 67 | if not hashbranch and not branches: |
|
68 | 68 | x = revs or None |
|
69 | 69 | if revs: |
|
70 | 70 | y = revs[0] |
|
71 | 71 | else: |
|
72 | 72 | y = None |
|
73 | 73 | return x, y |
|
74 | 74 | if revs: |
|
75 | 75 | revs = list(revs) |
|
76 | 76 | else: |
|
77 | 77 | revs = [] |
|
78 | 78 | |
|
79 | 79 | if not peer.capable('branchmap'): |
|
80 | 80 | if branches: |
|
81 | 81 | raise error.Abort(_("remote branch lookup not supported")) |
|
82 | 82 | revs.append(hashbranch) |
|
83 | 83 | return revs, revs[0] |
|
84 | 84 | branchmap = peer.branchmap() |
|
85 | 85 | |
|
86 | 86 | def primary(branch): |
|
87 | 87 | if branch == '.': |
|
88 | 88 | if not lrepo: |
|
89 | 89 | raise error.Abort(_("dirstate branch not accessible")) |
|
90 | 90 | branch = lrepo.dirstate.branch() |
|
91 | 91 | if branch in branchmap: |
|
92 | 92 | revs.extend(node.hex(r) for r in reversed(branchmap[branch])) |
|
93 | 93 | return True |
|
94 | 94 | else: |
|
95 | 95 | return False |
|
96 | 96 | |
|
97 | 97 | for branch in branches: |
|
98 | 98 | if not primary(branch): |
|
99 | 99 | raise error.RepoLookupError(_("unknown branch '%s'") % branch) |
|
100 | 100 | if hashbranch: |
|
101 | 101 | if not primary(hashbranch): |
|
102 | 102 | revs.append(hashbranch) |
|
103 | 103 | return revs, revs[0] |
|
104 | 104 | |
|
105 | 105 | def parseurl(path, branches=None): |
|
106 | 106 | '''parse url#branch, returning (url, (branch, branches))''' |
|
107 | 107 | |
|
108 | 108 | u = util.url(path) |
|
109 | 109 | branch = None |
|
110 | 110 | if u.fragment: |
|
111 | 111 | branch = u.fragment |
|
112 | 112 | u.fragment = None |
|
113 | 113 | return bytes(u), (branch, branches or []) |
|
114 | 114 | |
|
115 | 115 | schemes = { |
|
116 | 116 | 'bundle': bundlerepo, |
|
117 | 117 | 'union': unionrepo, |
|
118 | 118 | 'file': _local, |
|
119 | 119 | 'http': httppeer, |
|
120 | 120 | 'https': httppeer, |
|
121 | 121 | 'ssh': sshpeer, |
|
122 | 122 | 'static-http': statichttprepo, |
|
123 | 123 | } |
|
124 | 124 | |
|
125 | 125 | def _peerlookup(path): |
|
126 | 126 | u = util.url(path) |
|
127 | 127 | scheme = u.scheme or 'file' |
|
128 | 128 | thing = schemes.get(scheme) or schemes['file'] |
|
129 | 129 | try: |
|
130 | 130 | return thing(path) |
|
131 | 131 | except TypeError: |
|
132 | 132 | # we can't test callable(thing) because 'thing' can be an unloaded |
|
133 | 133 | # module that implements __call__ |
|
134 | 134 | if not util.safehasattr(thing, 'instance'): |
|
135 | 135 | raise |
|
136 | 136 | return thing |
|
137 | 137 | |
|
138 | 138 | def islocal(repo): |
|
139 | 139 | '''return true if repo (or path pointing to repo) is local''' |
|
140 | 140 | if isinstance(repo, bytes): |
|
141 | 141 | try: |
|
142 | 142 | return _peerlookup(repo).islocal(repo) |
|
143 | 143 | except AttributeError: |
|
144 | 144 | return False |
|
145 | 145 | return repo.local() |
|
146 | 146 | |
|
147 | 147 | def openpath(ui, path): |
|
148 | 148 | '''open path with open if local, url.open if remote''' |
|
149 | 149 | pathurl = util.url(path, parsequery=False, parsefragment=False) |
|
150 | 150 | if pathurl.islocal(): |
|
151 | 151 | return util.posixfile(pathurl.localpath(), 'rb') |
|
152 | 152 | else: |
|
153 | 153 | return url.open(ui, path) |
|
154 | 154 | |
|
155 | 155 | # a list of (ui, repo) functions called for wire peer initialization |
|
156 | 156 | wirepeersetupfuncs = [] |
|
157 | 157 | |
|
158 | 158 | def _peerorrepo(ui, path, create=False, presetupfuncs=None): |
|
159 | 159 | """return a repository object for the specified path""" |
|
160 | 160 | obj = _peerlookup(path).instance(ui, path, create) |
|
161 | 161 | ui = getattr(obj, "ui", ui) |
|
162 | 162 | for f in presetupfuncs or []: |
|
163 | 163 | f(ui, obj) |
|
164 | 164 | for name, module in extensions.extensions(ui): |
|
165 | 165 | hook = getattr(module, 'reposetup', None) |
|
166 | 166 | if hook: |
|
167 | 167 | hook(ui, obj) |
|
168 | 168 | if not obj.local(): |
|
169 | 169 | for f in wirepeersetupfuncs: |
|
170 | 170 | f(ui, obj) |
|
171 | 171 | return obj |
|
172 | 172 | |
|
173 | 173 | def repository(ui, path='', create=False, presetupfuncs=None): |
|
174 | 174 | """return a repository object for the specified path""" |
|
175 | 175 | peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs) |
|
176 | 176 | repo = peer.local() |
|
177 | 177 | if not repo: |
|
178 | 178 | raise error.Abort(_("repository '%s' is not local") % |
|
179 | 179 | (path or peer.url())) |
|
180 | 180 | return repo.filtered('visible') |
|
181 | 181 | |
|
182 | 182 | def peer(uiorrepo, opts, path, create=False): |
|
183 | 183 | '''return a repository peer for the specified path''' |
|
184 | 184 | rui = remoteui(uiorrepo, opts) |
|
185 | 185 | return _peerorrepo(rui, path, create).peer() |
|
186 | 186 | |
|
187 | 187 | def defaultdest(source): |
|
188 | 188 | '''return default destination of clone if none is given |
|
189 | 189 | |
|
190 | 190 | >>> defaultdest(b'foo') |
|
191 | 191 | 'foo' |
|
192 | 192 | >>> defaultdest(b'/foo/bar') |
|
193 | 193 | 'bar' |
|
194 | 194 | >>> defaultdest(b'/') |
|
195 | 195 | '' |
|
196 | 196 | >>> defaultdest(b'') |
|
197 | 197 | '' |
|
198 | 198 | >>> defaultdest(b'http://example.org/') |
|
199 | 199 | '' |
|
200 | 200 | >>> defaultdest(b'http://example.org/foo/') |
|
201 | 201 | 'foo' |
|
202 | 202 | ''' |
|
203 | 203 | path = util.url(source).path |
|
204 | 204 | if not path: |
|
205 | 205 | return '' |
|
206 | 206 | return os.path.basename(os.path.normpath(path)) |
|
207 | 207 | |
|
208 | 208 | def sharedreposource(repo): |
|
209 | 209 | """Returns repository object for source repository of a shared repo. |
|
210 | 210 | |
|
211 | 211 | If repo is not a shared repository, returns None. |
|
212 | 212 | """ |
|
213 | 213 | if repo.sharedpath == repo.path: |
|
214 | 214 | return None |
|
215 | 215 | |
|
216 | 216 | if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: |
|
217 | 217 | return repo.srcrepo |
|
218 | 218 | |
|
219 | 219 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
220 | 220 | source = repo.vfs.split(repo.sharedpath)[0] |
|
221 | 221 | srcurl, branches = parseurl(source) |
|
222 | 222 | srcrepo = repository(repo.ui, srcurl) |
|
223 | 223 | repo.srcrepo = srcrepo |
|
224 | 224 | return srcrepo |
|
225 | 225 | |
|
226 | 226 | def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, |
|
227 | 227 | relative=False): |
|
228 | 228 | '''create a shared repository''' |
|
229 | 229 | |
|
230 | 230 | if not islocal(source): |
|
231 | 231 | raise error.Abort(_('can only share local repositories')) |
|
232 | 232 | |
|
233 | 233 | if not dest: |
|
234 | 234 | dest = defaultdest(source) |
|
235 | 235 | else: |
|
236 | 236 | dest = ui.expandpath(dest) |
|
237 | 237 | |
|
238 | 238 | if isinstance(source, bytes): |
|
239 | 239 | origsource = ui.expandpath(source) |
|
240 | 240 | source, branches = parseurl(origsource) |
|
241 | 241 | srcrepo = repository(ui, source) |
|
242 | 242 | rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) |
|
243 | 243 | else: |
|
244 | 244 | srcrepo = source.local() |
|
245 | 245 | origsource = source = srcrepo.url() |
|
246 | 246 | checkout = None |
|
247 | 247 | |
|
248 | 248 | sharedpath = srcrepo.sharedpath # if our source is already sharing |
|
249 | 249 | |
|
250 | 250 | destwvfs = vfsmod.vfs(dest, realpath=True) |
|
251 | 251 | destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True) |
|
252 | 252 | |
|
253 | 253 | if destvfs.lexists(): |
|
254 | 254 | raise error.Abort(_('destination already exists')) |
|
255 | 255 | |
|
256 | 256 | if not destwvfs.isdir(): |
|
257 | 257 | destwvfs.mkdir() |
|
258 | 258 | destvfs.makedir() |
|
259 | 259 | |
|
260 | 260 | requirements = '' |
|
261 | 261 | try: |
|
262 | 262 | requirements = srcrepo.vfs.read('requires') |
|
263 | 263 | except IOError as inst: |
|
264 | 264 | if inst.errno != errno.ENOENT: |
|
265 | 265 | raise |
|
266 | 266 | |
|
267 | 267 | if relative: |
|
268 | 268 | try: |
|
269 | 269 | sharedpath = os.path.relpath(sharedpath, destvfs.base) |
|
270 | 270 | requirements += 'relshared\n' |
|
271 | 271 | except (IOError, ValueError) as e: |
|
272 | 272 | # ValueError is raised on Windows if the drive letters differ on |
|
273 | 273 | # each path |
|
274 | 274 | raise error.Abort(_('cannot calculate relative path'), |
|
275 | 275 | hint=stringutil.forcebytestr(e)) |
|
276 | 276 | else: |
|
277 | 277 | requirements += 'shared\n' |
|
278 | 278 | |
|
279 | 279 | destvfs.write('requires', requirements) |
|
280 | 280 | destvfs.write('sharedpath', sharedpath) |
|
281 | 281 | |
|
282 | 282 | r = repository(ui, destwvfs.base) |
|
283 | 283 | postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath) |
|
284 | 284 | _postshareupdate(r, update, checkout=checkout) |
|
285 | 285 | return r |
|
286 | 286 | |
|
287 | 287 | def unshare(ui, repo): |
|
288 | 288 | """convert a shared repository to a normal one |
|
289 | 289 | |
|
290 | 290 | Copy the store data to the repo and remove the sharedpath data. |
|
291 | 291 | """ |
|
292 | 292 | |
|
293 | 293 | destlock = lock = None |
|
294 | 294 | lock = repo.lock() |
|
295 | 295 | try: |
|
296 | 296 | # we use locks here because if we race with commit, we |
|
297 | 297 | # can end up with extra data in the cloned revlogs that's |
|
298 | 298 | # not pointed to by changesets, thus causing verify to |
|
299 | 299 | # fail |
|
300 | 300 | |
|
301 | 301 | destlock = copystore(ui, repo, repo.path) |
|
302 | 302 | |
|
303 | 303 | sharefile = repo.vfs.join('sharedpath') |
|
304 | 304 | util.rename(sharefile, sharefile + '.old') |
|
305 | 305 | |
|
306 | 306 | repo.requirements.discard('shared') |
|
307 | 307 | repo.requirements.discard('relshared') |
|
308 | 308 | repo._writerequirements() |
|
309 | 309 | finally: |
|
310 | 310 | destlock and destlock.release() |
|
311 | 311 | lock and lock.release() |
|
312 | 312 | |
|
313 | 313 | # update store, spath, svfs and sjoin of repo |
|
314 | 314 | repo.unfiltered().__init__(repo.baseui, repo.root) |
|
315 | 315 | |
|
316 | 316 | # TODO: figure out how to access subrepos that exist, but were previously |
|
317 | 317 | # removed from .hgsub |
|
318 | 318 | c = repo['.'] |
|
319 | 319 | subs = c.substate |
|
320 | 320 | for s in sorted(subs): |
|
321 | 321 | c.sub(s).unshare() |
|
322 | 322 | |
|
323 | 323 | def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None): |
|
324 | 324 | """Called after a new shared repo is created. |
|
325 | 325 | |
|
326 | 326 | The new repo only has a requirements file and pointer to the source. |
|
327 | 327 | This function configures additional shared data. |
|
328 | 328 | |
|
329 | 329 | Extensions can wrap this function and write additional entries to |
|
330 | 330 | destrepo/.hg/shared to indicate additional pieces of data to be shared. |
|
331 | 331 | """ |
|
332 | 332 | default = defaultpath or sourcerepo.ui.config('paths', 'default') |
|
333 | 333 | if default: |
|
334 | 334 | template = ('[paths]\n' |
|
335 | 335 | 'default = %s\n') |
|
336 | 336 | destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) |
|
337 | 337 | |
|
338 | 338 | with destrepo.wlock(): |
|
339 | 339 | if bookmarks: |
|
340 | 340 | destrepo.vfs.write('shared', sharedbookmarks + '\n') |
|
341 | 341 | |
|
342 | 342 | def _postshareupdate(repo, update, checkout=None): |
|
343 | 343 | """Maybe perform a working directory update after a shared repo is created. |
|
344 | 344 | |
|
345 | 345 | ``update`` can be a boolean or a revision to update to. |
|
346 | 346 | """ |
|
347 | 347 | if not update: |
|
348 | 348 | return |
|
349 | 349 | |
|
350 | 350 | repo.ui.status(_("updating working directory\n")) |
|
351 | 351 | if update is not True: |
|
352 | 352 | checkout = update |
|
353 | 353 | for test in (checkout, 'default', 'tip'): |
|
354 | 354 | if test is None: |
|
355 | 355 | continue |
|
356 | 356 | try: |
|
357 | 357 | uprev = repo.lookup(test) |
|
358 | 358 | break |
|
359 | 359 | except error.RepoLookupError: |
|
360 | 360 | continue |
|
361 | 361 | _update(repo, uprev) |
|
362 | 362 | |
|
363 | 363 | def copystore(ui, srcrepo, destpath): |
|
364 | 364 | '''copy files from store of srcrepo in destpath |
|
365 | 365 | |
|
366 | 366 | returns destlock |
|
367 | 367 | ''' |
|
368 | 368 | destlock = None |
|
369 | 369 | try: |
|
370 | 370 | hardlink = None |
|
371 | 371 | num = 0 |
|
372 | 372 | closetopic = [None] |
|
373 | 373 | def prog(topic, pos): |
|
374 | 374 | if pos is None: |
|
375 | 375 | closetopic[0] = topic |
|
376 | 376 | else: |
|
377 | 377 | ui.progress(topic, pos + num) |
|
378 | 378 | srcpublishing = srcrepo.publishing() |
|
379 | 379 | srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
|
380 | 380 | dstvfs = vfsmod.vfs(destpath) |
|
381 | 381 | for f in srcrepo.store.copylist(): |
|
382 | 382 | if srcpublishing and f.endswith('phaseroots'): |
|
383 | 383 | continue |
|
384 | 384 | dstbase = os.path.dirname(f) |
|
385 | 385 | if dstbase and not dstvfs.exists(dstbase): |
|
386 | 386 | dstvfs.mkdir(dstbase) |
|
387 | 387 | if srcvfs.exists(f): |
|
388 | 388 | if f.endswith('data'): |
|
389 | 389 | # 'dstbase' may be empty (e.g. revlog format 0) |
|
390 | 390 | lockfile = os.path.join(dstbase, "lock") |
|
391 | 391 | # lock to avoid premature writing to the target |
|
392 | 392 | destlock = lock.lock(dstvfs, lockfile) |
|
393 | 393 | hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), |
|
394 | 394 | hardlink, progress=prog) |
|
395 | 395 | num += n |
|
396 | 396 | if hardlink: |
|
397 | 397 | ui.debug("linked %d files\n" % num) |
|
398 | 398 | if closetopic[0]: |
|
399 | 399 | ui.progress(closetopic[0], None) |
|
400 | 400 | else: |
|
401 | 401 | ui.debug("copied %d files\n" % num) |
|
402 | 402 | if closetopic[0]: |
|
403 | 403 | ui.progress(closetopic[0], None) |
|
404 | 404 | return destlock |
|
405 | 405 | except: # re-raises |
|
406 | 406 | release(destlock) |
|
407 | 407 | raise |
|
408 | 408 | |
|
409 | 409 | def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False, |
|
410 | 410 | rev=None, update=True, stream=False): |
|
411 | 411 | """Perform a clone using a shared repo. |
|
412 | 412 | |
|
413 | 413 | The store for the repository will be located at <sharepath>/.hg. The |
|
414 | 414 | specified revisions will be cloned or pulled from "source". A shared repo |
|
415 | 415 | will be created at "dest" and a working copy will be created if "update" is |
|
416 | 416 | True. |
|
417 | 417 | """ |
|
418 | 418 | revs = None |
|
419 | 419 | if rev: |
|
420 | 420 | if not srcpeer.capable('lookup'): |
|
421 | 421 | raise error.Abort(_("src repository does not support " |
|
422 | 422 | "revision lookup and so doesn't " |
|
423 | 423 | "support clone by revision")) |
|
424 | 424 | revs = [srcpeer.lookup(r) for r in rev] |
|
425 | 425 | |
|
426 | 426 | # Obtain a lock before checking for or cloning the pooled repo otherwise |
|
427 | 427 | # 2 clients may race creating or populating it. |
|
428 | 428 | pooldir = os.path.dirname(sharepath) |
|
429 | 429 | # lock class requires the directory to exist. |
|
430 | 430 | try: |
|
431 | 431 | util.makedir(pooldir, False) |
|
432 | 432 | except OSError as e: |
|
433 | 433 | if e.errno != errno.EEXIST: |
|
434 | 434 | raise |
|
435 | 435 | |
|
436 | 436 | poolvfs = vfsmod.vfs(pooldir) |
|
437 | 437 | basename = os.path.basename(sharepath) |
|
438 | 438 | |
|
439 | 439 | with lock.lock(poolvfs, '%s.lock' % basename): |
|
440 | 440 | if os.path.exists(sharepath): |
|
441 | 441 | ui.status(_('(sharing from existing pooled repository %s)\n') % |
|
442 | 442 | basename) |
|
443 | 443 | else: |
|
444 | 444 | ui.status(_('(sharing from new pooled repository %s)\n') % basename) |
|
445 | 445 | # Always use pull mode because hardlinks in share mode don't work |
|
446 | 446 | # well. Never update because working copies aren't necessary in |
|
447 | 447 | # share mode. |
|
448 | 448 | clone(ui, peeropts, source, dest=sharepath, pull=True, |
|
449 | 449 | revs=rev, update=False, stream=stream) |
|
450 | 450 | |
|
451 | 451 | # Resolve the value to put in [paths] section for the source. |
|
452 | 452 | if islocal(source): |
|
453 | 453 | defaultpath = os.path.abspath(util.urllocalpath(source)) |
|
454 | 454 | else: |
|
455 | 455 | defaultpath = source |
|
456 | 456 | |
|
457 | 457 | sharerepo = repository(ui, path=sharepath) |
|
458 | 458 | share(ui, sharerepo, dest=dest, update=False, bookmarks=False, |
|
459 | 459 | defaultpath=defaultpath) |
|
460 | 460 | |
|
461 | 461 | # We need to perform a pull against the dest repo to fetch bookmarks |
|
462 | 462 | # and other non-store data that isn't shared by default. In the case of |
|
463 | 463 | # non-existing shared repo, this means we pull from the remote twice. This |
|
464 | 464 | # is a bit weird. But at the time it was implemented, there wasn't an easy |
|
465 | 465 | # way to pull just non-changegroup data. |
|
466 | 466 | destrepo = repository(ui, path=dest) |
|
467 | 467 | exchange.pull(destrepo, srcpeer, heads=revs) |
|
468 | 468 | |
|
469 | 469 | _postshareupdate(destrepo, update) |
|
470 | 470 | |
|
471 | 471 | return srcpeer, peer(ui, peeropts, dest) |
|
472 | 472 | |
|
473 | 473 | # Recomputing branch cache might be slow on big repos, |
|
474 | 474 | # so just copy it |
|
475 | 475 | def _copycache(srcrepo, dstcachedir, fname): |
|
476 | 476 | """copy a cache from srcrepo to destcachedir (if it exists)""" |
|
477 | 477 | srcbranchcache = srcrepo.vfs.join('cache/%s' % fname) |
|
478 | 478 | dstbranchcache = os.path.join(dstcachedir, fname) |
|
479 | 479 | if os.path.exists(srcbranchcache): |
|
480 | 480 | if not os.path.exists(dstcachedir): |
|
481 | 481 | os.mkdir(dstcachedir) |
|
482 | 482 | util.copyfile(srcbranchcache, dstbranchcache) |
|
483 | 483 | |
|
484 | 484 | def clone(ui, peeropts, source, dest=None, pull=False, revs=None, |
|
485 | 485 | update=True, stream=False, branch=None, shareopts=None): |
|
486 | 486 | """Make a copy of an existing repository. |
|
487 | 487 | |
|
488 | 488 | Create a copy of an existing repository in a new directory. The |
|
489 | 489 | source and destination are URLs, as passed to the repository |
|
490 | 490 | function. Returns a pair of repository peers, the source and |
|
491 | 491 | newly created destination. |
|
492 | 492 | |
|
493 | 493 | The location of the source is added to the new repository's |
|
494 | 494 | .hg/hgrc file, as the default to be used for future pulls and |
|
495 | 495 | pushes. |
|
496 | 496 | |
|
497 | 497 | If an exception is raised, the partly cloned/updated destination |
|
498 | 498 | repository will be deleted. |
|
499 | 499 | |
|
500 | 500 | Arguments: |
|
501 | 501 | |
|
502 | 502 | source: repository object or URL |
|
503 | 503 | |
|
504 | 504 | dest: URL of destination repository to create (defaults to base |
|
505 | 505 | name of source repository) |
|
506 | 506 | |
|
507 | 507 | pull: always pull from source repository, even in local case or if the |
|
508 | 508 | server prefers streaming |
|
509 | 509 | |
|
510 | 510 | stream: stream raw data uncompressed from repository (fast over |
|
511 | 511 | LAN, slow over WAN) |
|
512 | 512 | |
|
513 | 513 | revs: revision to clone up to (implies pull=True) |
|
514 | 514 | |
|
515 | 515 | update: update working directory after clone completes, if |
|
516 | 516 | destination is local repository (True means update to default rev, |
|
517 | 517 | anything else is treated as a revision) |
|
518 | 518 | |
|
519 | 519 | branch: branches to clone |
|
520 | 520 | |
|
521 | 521 | shareopts: dict of options to control auto sharing behavior. The "pool" key |
|
522 | 522 | activates auto sharing mode and defines the directory for stores. The |
|
523 | 523 | "mode" key determines how to construct the directory name of the shared |
|
524 | 524 | repository. "identity" means the name is derived from the node of the first |
|
525 | 525 | changeset in the repository. "remote" means the name is derived from the |
|
526 | 526 | remote's path/URL. Defaults to "identity." |
|
527 | 527 | """ |
|
528 | 528 | |
|
529 | 529 | if isinstance(source, bytes): |
|
530 | 530 | origsource = ui.expandpath(source) |
|
531 | 531 | source, branches = parseurl(origsource, branch) |
|
532 | 532 | srcpeer = peer(ui, peeropts, source) |
|
533 | 533 | else: |
|
534 | 534 | srcpeer = source.peer() # in case we were called with a localrepo |
|
535 | 535 | branches = (None, branch or []) |
|
536 | 536 | origsource = source = srcpeer.url() |
|
537 | 537 | revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
|
538 | 538 | |
|
539 | 539 | if dest is None: |
|
540 | 540 | dest = defaultdest(source) |
|
541 | 541 | if dest: |
|
542 | 542 | ui.status(_("destination directory: %s\n") % dest) |
|
543 | 543 | else: |
|
544 | 544 | dest = ui.expandpath(dest) |
|
545 | 545 | |
|
546 | 546 | dest = util.urllocalpath(dest) |
|
547 | 547 | source = util.urllocalpath(source) |
|
548 | 548 | |
|
549 | 549 | if not dest: |
|
550 | 550 | raise error.Abort(_("empty destination path is not valid")) |
|
551 | 551 | |
|
552 | 552 | destvfs = vfsmod.vfs(dest, expandpath=True) |
|
553 | 553 | if destvfs.lexists(): |
|
554 | 554 | if not destvfs.isdir(): |
|
555 | 555 | raise error.Abort(_("destination '%s' already exists") % dest) |
|
556 | 556 | elif destvfs.listdir(): |
|
557 | 557 | raise error.Abort(_("destination '%s' is not empty") % dest) |
|
558 | 558 | |
|
559 | 559 | shareopts = shareopts or {} |
|
560 | 560 | sharepool = shareopts.get('pool') |
|
561 | 561 | sharenamemode = shareopts.get('mode') |
|
562 | 562 | if sharepool and islocal(dest): |
|
563 | 563 | sharepath = None |
|
564 | 564 | if sharenamemode == 'identity': |
|
565 | 565 | # Resolve the name from the initial changeset in the remote |
|
566 | 566 | # repository. This returns nullid when the remote is empty. It |
|
567 | 567 | # raises RepoLookupError if revision 0 is filtered or otherwise |
|
568 | 568 | # not available. If we fail to resolve, sharing is not enabled. |
|
569 | 569 | try: |
|
570 | 570 | rootnode = srcpeer.lookup('0') |
|
571 | 571 | if rootnode != node.nullid: |
|
572 | 572 | sharepath = os.path.join(sharepool, node.hex(rootnode)) |
|
573 | 573 | else: |
|
574 | 574 | ui.status(_('(not using pooled storage: ' |
|
575 | 575 | 'remote appears to be empty)\n')) |
|
576 | 576 | except error.RepoLookupError: |
|
577 | 577 | ui.status(_('(not using pooled storage: ' |
|
578 | 578 | 'unable to resolve identity of remote)\n')) |
|
579 | 579 | elif sharenamemode == 'remote': |
|
580 | 580 | sharepath = os.path.join( |
|
581 | 581 | sharepool, node.hex(hashlib.sha1(source).digest())) |
|
582 | 582 | else: |
|
583 | 583 | raise error.Abort(_('unknown share naming mode: %s') % |
|
584 | 584 | sharenamemode) |
|
585 | 585 | |
|
586 | 586 | if sharepath: |
|
587 | 587 | return clonewithshare(ui, peeropts, sharepath, source, srcpeer, |
|
588 | 588 | dest, pull=pull, rev=revs, update=update, |
|
589 | 589 | stream=stream) |
|
590 | 590 | |
|
591 | 591 | srclock = destlock = cleandir = None |
|
592 | 592 | srcrepo = srcpeer.local() |
|
593 | 593 | try: |
|
594 | 594 | abspath = origsource |
|
595 | 595 | if islocal(origsource): |
|
596 | 596 | abspath = os.path.abspath(util.urllocalpath(origsource)) |
|
597 | 597 | |
|
598 | 598 | if islocal(dest): |
|
599 | 599 | cleandir = dest |
|
600 | 600 | |
|
601 | 601 | copy = False |
|
602 | 602 | if (srcrepo and srcrepo.cancopy() and islocal(dest) |
|
603 | 603 | and not phases.hassecret(srcrepo)): |
|
604 | 604 | copy = not pull and not revs |
|
605 | 605 | |
|
606 | 606 | if copy: |
|
607 | 607 | try: |
|
608 | 608 | # we use a lock here because if we race with commit, we |
|
609 | 609 | # can end up with extra data in the cloned revlogs that's |
|
610 | 610 | # not pointed to by changesets, thus causing verify to |
|
611 | 611 | # fail |
|
612 | 612 | srclock = srcrepo.lock(wait=False) |
|
613 | 613 | except error.LockError: |
|
614 | 614 | copy = False |
|
615 | 615 | |
|
616 | 616 | if copy: |
|
617 | 617 | srcrepo.hook('preoutgoing', throw=True, source='clone') |
|
618 | 618 | hgdir = os.path.realpath(os.path.join(dest, ".hg")) |
|
619 | 619 | if not os.path.exists(dest): |
|
620 | 620 | os.mkdir(dest) |
|
621 | 621 | else: |
|
622 | 622 | # only clean up directories we create ourselves |
|
623 | 623 | cleandir = hgdir |
|
624 | 624 | try: |
|
625 | 625 | destpath = hgdir |
|
626 | 626 | util.makedir(destpath, notindexed=True) |
|
627 | 627 | except OSError as inst: |
|
628 | 628 | if inst.errno == errno.EEXIST: |
|
629 | 629 | cleandir = None |
|
630 | 630 | raise error.Abort(_("destination '%s' already exists") |
|
631 | 631 | % dest) |
|
632 | 632 | raise |
|
633 | 633 | |
|
634 | 634 | destlock = copystore(ui, srcrepo, destpath) |
|
635 | 635 | # copy bookmarks over |
|
636 | 636 | srcbookmarks = srcrepo.vfs.join('bookmarks') |
|
637 | 637 | dstbookmarks = os.path.join(destpath, 'bookmarks') |
|
638 | 638 | if os.path.exists(srcbookmarks): |
|
639 | 639 | util.copyfile(srcbookmarks, dstbookmarks) |
|
640 | 640 | |
|
641 | 641 | dstcachedir = os.path.join(destpath, 'cache') |
|
642 | 642 | for cache in cacheutil.cachetocopy(srcrepo): |
|
643 | 643 | _copycache(srcrepo, dstcachedir, cache) |
|
644 | 644 | |
|
645 | 645 | # we need to re-init the repo after manually copying the data |
|
646 | 646 | # into it |
|
647 | 647 | destpeer = peer(srcrepo, peeropts, dest) |
|
648 | 648 | srcrepo.hook('outgoing', source='clone', |
|
649 | 649 | node=node.hex(node.nullid)) |
|
650 | 650 | else: |
|
651 | 651 | try: |
|
652 | 652 | destpeer = peer(srcrepo or ui, peeropts, dest, create=True) |
|
653 | 653 | # only pass ui when no srcrepo |
|
654 | 654 | except OSError as inst: |
|
655 | 655 | if inst.errno == errno.EEXIST: |
|
656 | 656 | cleandir = None |
|
657 | 657 | raise error.Abort(_("destination '%s' already exists") |
|
658 | 658 | % dest) |
|
659 | 659 | raise |
|
660 | 660 | |
|
661 | 661 | if revs: |
|
662 | 662 | if not srcpeer.capable('lookup'): |
|
663 | 663 | raise error.Abort(_("src repository does not support " |
|
664 | 664 | "revision lookup and so doesn't " |
|
665 | 665 | "support clone by revision")) |
|
666 | 666 | revs = [srcpeer.lookup(r) for r in revs] |
|
667 | 667 | checkout = revs[0] |
|
668 | 668 | else: |
|
669 | 669 | revs = None |
|
670 | 670 | local = destpeer.local() |
|
671 | 671 | if local: |
|
672 | 672 | u = util.url(abspath) |
|
673 | 673 | defaulturl = bytes(u) |
|
674 | 674 | local.ui.setconfig('paths', 'default', defaulturl, 'clone') |
|
675 | 675 | if not stream: |
|
676 | 676 | if pull: |
|
677 | 677 | stream = False |
|
678 | 678 | else: |
|
679 | 679 | stream = None |
|
680 | 680 | # internal config: ui.quietbookmarkmove |
|
681 | 681 | overrides = {('ui', 'quietbookmarkmove'): True} |
|
682 | 682 | with local.ui.configoverride(overrides, 'clone'): |
|
683 | 683 | exchange.pull(local, srcpeer, revs, |
|
684 | 684 | streamclonerequested=stream) |
|
685 | 685 | elif srcrepo: |
|
686 | 686 | exchange.push(srcrepo, destpeer, revs=revs, |
|
687 | 687 | bookmarks=srcrepo._bookmarks.keys()) |
|
688 | 688 | else: |
|
689 | 689 | raise error.Abort(_("clone from remote to remote not supported") |
|
690 | 690 | ) |
|
691 | 691 | |
|
692 | 692 | cleandir = None |
|
693 | 693 | |
|
694 | 694 | destrepo = destpeer.local() |
|
695 | 695 | if destrepo: |
|
696 | 696 | template = uimod.samplehgrcs['cloned'] |
|
697 | 697 | u = util.url(abspath) |
|
698 | 698 | u.passwd = None |
|
699 | 699 | defaulturl = bytes(u) |
|
700 | 700 | destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl)) |
|
701 | 701 | destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') |
|
702 | 702 | |
|
703 | 703 | if ui.configbool('experimental', 'remotenames'): |
|
704 | 704 | logexchange.pullremotenames(destrepo, srcpeer) |
|
705 | 705 | |
|
706 | 706 | if update: |
|
707 | 707 | if update is not True: |
|
708 | 708 | checkout = srcpeer.lookup(update) |
|
709 | 709 | uprev = None |
|
710 | 710 | status = None |
|
711 | 711 | if checkout is not None: |
|
712 | 712 | if checkout in destrepo: |
|
713 | 713 | uprev = checkout |
|
714 | 714 | else: |
|
715 | 715 | if update is not True: |
|
716 | 716 | try: |
|
717 | 717 | uprev = destrepo.lookup(update) |
|
718 | 718 | except error.RepoLookupError: |
|
719 | 719 | pass |
|
720 | 720 | if uprev is None: |
|
721 | 721 | try: |
|
722 | 722 | uprev = destrepo._bookmarks['@'] |
|
723 | 723 | update = '@' |
|
724 | 724 | bn = destrepo[uprev].branch() |
|
725 | 725 | if bn == 'default': |
|
726 | 726 | status = _("updating to bookmark @\n") |
|
727 | 727 | else: |
|
728 | 728 | status = (_("updating to bookmark @ on branch %s\n") |
|
729 | 729 | % bn) |
|
730 | 730 | except KeyError: |
|
731 | 731 | try: |
|
732 | 732 | uprev = destrepo.branchtip('default') |
|
733 | 733 | except error.RepoLookupError: |
|
734 | 734 | uprev = destrepo.lookup('tip') |
|
735 | 735 | if not status: |
|
736 | 736 | bn = destrepo[uprev].branch() |
|
737 | 737 | status = _("updating to branch %s\n") % bn |
|
738 | 738 | destrepo.ui.status(status) |
|
739 | 739 | _update(destrepo, uprev) |
|
740 | 740 | if update in destrepo._bookmarks: |
|
741 | 741 | bookmarks.activate(destrepo, update) |
|
742 | 742 | finally: |
|
743 | 743 | release(srclock, destlock) |
|
744 | 744 | if cleandir is not None: |
|
745 | 745 | shutil.rmtree(cleandir, True) |
|
746 | 746 | if srcpeer is not None: |
|
747 | 747 | srcpeer.close() |
|
748 | 748 | return srcpeer, destpeer |
|
749 | 749 | |
|
750 | 750 | def _showstats(repo, stats, quietempty=False): |
|
751 | 751 | if quietempty and stats.isempty(): |
|
752 | 752 | return |
|
753 | 753 | repo.ui.status(_("%d files updated, %d files merged, " |
|
754 | 754 | "%d files removed, %d files unresolved\n") % ( |
|
755 | 755 | stats.updatedcount, stats.mergedcount, |
|
756 | 756 | stats.removedcount, stats.unresolvedcount)) |
|
757 | 757 | |
|
758 | 758 | def updaterepo(repo, node, overwrite, updatecheck=None): |
|
759 | 759 | """Update the working directory to node. |
|
760 | 760 | |
|
761 | 761 | When overwrite is set, changes are clobbered, merged else |
|
762 | 762 | |
|
763 | 763 | returns stats (see pydoc mercurial.merge.applyupdates)""" |
|
764 | 764 | return mergemod.update(repo, node, False, overwrite, |
|
765 | 765 | labels=['working copy', 'destination'], |
|
766 | 766 | updatecheck=updatecheck) |
|
767 | 767 | |
|
768 | 768 | def update(repo, node, quietempty=False, updatecheck=None): |
|
769 | 769 | """update the working directory to node""" |
|
770 | 770 | stats = updaterepo(repo, node, False, updatecheck=updatecheck) |
|
771 | 771 | _showstats(repo, stats, quietempty) |
|
772 | 772 | if stats.unresolvedcount: |
|
773 | 773 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
|
774 | 774 | return stats.unresolvedcount > 0 |
|
775 | 775 | |
|
776 | 776 | # naming conflict in clone() |
|
777 | 777 | _update = update |
|
778 | 778 | |
|
779 | 779 | def clean(repo, node, show_stats=True, quietempty=False): |
|
780 | 780 | """forcibly switch the working directory to node, clobbering changes""" |
|
781 | 781 | stats = updaterepo(repo, node, True) |
|
782 | 782 | repo.vfs.unlinkpath('graftstate', ignoremissing=True) |
|
783 | 783 | if show_stats: |
|
784 | 784 | _showstats(repo, stats, quietempty) |
|
785 | 785 | return stats.unresolvedcount > 0 |
|
786 | 786 | |
|
787 | 787 | # naming conflict in updatetotally() |
|
788 | 788 | _clean = clean |
|
789 | 789 | |
|
790 | 790 | def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
|
791 | 791 | """Update the working directory with extra care for non-file components |
|
792 | 792 | |
|
793 | 793 | This takes care of non-file components below: |
|
794 | 794 | |
|
795 | 795 | :bookmark: might be advanced or (in)activated |
|
796 | 796 | |
|
797 | 797 | This takes arguments below: |
|
798 | 798 | |
|
799 | 799 | :checkout: to which revision the working directory is updated |
|
800 | 800 | :brev: a name, which might be a bookmark to be activated after updating |
|
801 | 801 | :clean: whether changes in the working directory can be discarded |
|
802 | 802 | :updatecheck: how to deal with a dirty working directory |
|
803 | 803 | |
|
804 | 804 | Valid values for updatecheck are (None => linear): |
|
805 | 805 | |
|
806 | 806 | * abort: abort if the working directory is dirty |
|
807 | 807 | * none: don't check (merge working directory changes into destination) |
|
808 | 808 | * linear: check that update is linear before merging working directory |
|
809 | 809 | changes into destination |
|
810 | 810 | * noconflict: check that the update does not result in file merges |
|
811 | 811 | |
|
812 | 812 | This returns whether conflict is detected at updating or not. |
|
813 | 813 | """ |
|
814 | 814 | if updatecheck is None: |
|
815 | 815 | updatecheck = ui.config('commands', 'update.check') |
|
816 | 816 | if updatecheck not in ('abort', 'none', 'linear', 'noconflict'): |
|
817 | 817 | # If not configured, or invalid value configured |
|
818 | 818 | updatecheck = 'linear' |
|
819 | 819 | with repo.wlock(): |
|
820 | 820 | movemarkfrom = None |
|
821 | 821 | warndest = False |
|
822 | 822 | if checkout is None: |
|
823 | 823 | updata = destutil.destupdate(repo, clean=clean) |
|
824 | 824 | checkout, movemarkfrom, brev = updata |
|
825 | 825 | warndest = True |
|
826 | 826 | |
|
827 | 827 | if clean: |
|
828 | 828 | ret = _clean(repo, checkout) |
|
829 | 829 | else: |
|
830 | 830 | if updatecheck == 'abort': |
|
831 | 831 | cmdutil.bailifchanged(repo, merge=False) |
|
832 | 832 | updatecheck = 'none' |
|
833 | 833 | ret = _update(repo, checkout, updatecheck=updatecheck) |
|
834 | 834 | |
|
835 | 835 | if not ret and movemarkfrom: |
|
836 | 836 | if movemarkfrom == repo['.'].node(): |
|
837 | 837 | pass # no-op update |
|
838 | 838 | elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): |
|
839 | 839 | b = ui.label(repo._activebookmark, 'bookmarks.active') |
|
840 | 840 | ui.status(_("updating bookmark %s\n") % b) |
|
841 | 841 | else: |
|
842 | 842 | # this can happen with a non-linear update |
|
843 | 843 | b = ui.label(repo._activebookmark, 'bookmarks') |
|
844 | 844 | ui.status(_("(leaving bookmark %s)\n") % b) |
|
845 | 845 | bookmarks.deactivate(repo) |
|
846 | 846 | elif brev in repo._bookmarks: |
|
847 | 847 | if brev != repo._activebookmark: |
|
848 | 848 | b = ui.label(brev, 'bookmarks.active') |
|
849 | 849 | ui.status(_("(activating bookmark %s)\n") % b) |
|
850 | 850 | bookmarks.activate(repo, brev) |
|
851 | 851 | elif brev: |
|
852 | 852 | if repo._activebookmark: |
|
853 | 853 | b = ui.label(repo._activebookmark, 'bookmarks') |
|
854 | 854 | ui.status(_("(leaving bookmark %s)\n") % b) |
|
855 | 855 | bookmarks.deactivate(repo) |
|
856 | 856 | |
|
857 | 857 | if warndest: |
|
858 | 858 | destutil.statusotherdests(ui, repo) |
|
859 | 859 | |
|
860 | 860 | return ret |
|
861 | 861 | |
|
862 | 862 | def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None, |
|
863 | 863 | abort=False): |
|
864 | 864 | """Branch merge with node, resolving changes. Return true if any |
|
865 | 865 | unresolved conflicts.""" |
|
866 | 866 | if not abort: |
|
867 | 867 | stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce, |
|
868 | 868 | labels=labels) |
|
869 | 869 | else: |
|
870 | 870 | ms = mergemod.mergestate.read(repo) |
|
871 | 871 | if ms.active(): |
|
872 | 872 | # there were conflicts |
|
873 | 873 | node = ms.localctx.hex() |
|
874 | 874 | else: |
|
875 | 875 | # there were no conficts, mergestate was not stored |
|
876 | 876 | node = repo['.'].hex() |
|
877 | 877 | |
|
878 | 878 | repo.ui.status(_("aborting the merge, updating back to" |
|
879 | 879 | " %s\n") % node[:12]) |
|
880 | 880 | stats = mergemod.update(repo, node, branchmerge=False, force=True, |
|
881 | 881 | labels=labels) |
|
882 | 882 | |
|
883 | 883 | _showstats(repo, stats) |
|
884 | 884 | if stats.unresolvedcount: |
|
885 | 885 | repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " |
|
886 | 886 | "or 'hg merge --abort' to abandon\n")) |
|
887 | 887 | elif remind and not abort: |
|
888 | 888 | repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
|
889 | 889 | return stats.unresolvedcount > 0 |
|
890 | 890 | |
|
891 | 891 | def _incoming(displaychlist, subreporecurse, ui, repo, source, |
|
892 | 892 | opts, buffered=False): |
|
893 | 893 | """ |
|
894 | 894 | Helper for incoming / gincoming. |
|
895 | 895 | displaychlist gets called with |
|
896 | 896 | (remoterepo, incomingchangesetlist, displayer) parameters, |
|
897 | 897 | and is supposed to contain only code that can't be unified. |
|
898 | 898 | """ |
|
899 | 899 | source, branches = parseurl(ui.expandpath(source), opts.get('branch')) |
|
900 | 900 | other = peer(repo, opts, source) |
|
901 | 901 | ui.status(_('comparing with %s\n') % util.hidepassword(source)) |
|
902 | 902 | revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) |
|
903 | 903 | |
|
904 | 904 | if revs: |
|
905 | 905 | revs = [other.lookup(rev) for rev in revs] |
|
906 | 906 | other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, |
|
907 | 907 | revs, opts["bundle"], opts["force"]) |
|
908 | 908 | try: |
|
909 | 909 | if not chlist: |
|
910 | 910 | ui.status(_("no changes found\n")) |
|
911 | 911 | return subreporecurse() |
|
912 | 912 | ui.pager('incoming') |
|
913 | 913 | displayer = logcmdutil.changesetdisplayer(ui, other, opts, |
|
914 | 914 | buffered=buffered) |
|
915 | 915 | displaychlist(other, chlist, displayer) |
|
916 | 916 | displayer.close() |
|
917 | 917 | finally: |
|
918 | 918 | cleanupfn() |
|
919 | 919 | subreporecurse() |
|
920 | 920 | return 0 # exit code is zero since we found incoming changes |
|
921 | 921 | |
|
922 | 922 | def incoming(ui, repo, source, opts): |
|
923 | 923 | def subreporecurse(): |
|
924 | 924 | ret = 1 |
|
925 | 925 | if opts.get('subrepos'): |
|
926 | 926 | ctx = repo[None] |
|
927 | 927 | for subpath in sorted(ctx.substate): |
|
928 | 928 | sub = ctx.sub(subpath) |
|
929 | 929 | ret = min(ret, sub.incoming(ui, source, opts)) |
|
930 | 930 | return ret |
|
931 | 931 | |
|
932 | 932 | def display(other, chlist, displayer): |
|
933 | 933 | limit = logcmdutil.getlimit(opts) |
|
934 | 934 | if opts.get('newest_first'): |
|
935 | 935 | chlist.reverse() |
|
936 | 936 | count = 0 |
|
937 | 937 | for n in chlist: |
|
938 | 938 | if limit is not None and count >= limit: |
|
939 | 939 | break |
|
940 | 940 | parents = [p for p in other.changelog.parents(n) if p != nullid] |
|
941 | 941 | if opts.get('no_merges') and len(parents) == 2: |
|
942 | 942 | continue |
|
943 | 943 | count += 1 |
|
944 | 944 | displayer.show(other[n]) |
|
945 | 945 | return _incoming(display, subreporecurse, ui, repo, source, opts) |
|
946 | 946 | |
|
947 | 947 | def _outgoing(ui, repo, dest, opts): |
|
948 | 948 | path = ui.paths.getpath(dest, default=('default-push', 'default')) |
|
949 | 949 | if not path: |
|
950 | 950 | raise error.Abort(_('default repository not configured!'), |
|
951 | 951 | hint=_("see 'hg help config.paths'")) |
|
952 | 952 | dest = path.pushloc or path.loc |
|
953 | 953 | branches = path.branch, opts.get('branch') or [] |
|
954 | 954 | |
|
955 | 955 | ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
|
956 | 956 | revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
957 | 957 | if revs: |
|
958 | 958 | revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
|
959 | 959 | |
|
960 | 960 | other = peer(repo, opts, dest) |
|
961 | 961 | outgoing = discovery.findcommonoutgoing(repo, other, revs, |
|
962 | 962 | force=opts.get('force')) |
|
963 | 963 | o = outgoing.missing |
|
964 | 964 | if not o: |
|
965 | 965 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
966 | 966 | return o, other |
|
967 | 967 | |
|
968 | 968 | def outgoing(ui, repo, dest, opts): |
|
969 | 969 | def recurse(): |
|
970 | 970 | ret = 1 |
|
971 | 971 | if opts.get('subrepos'): |
|
972 | 972 | ctx = repo[None] |
|
973 | 973 | for subpath in sorted(ctx.substate): |
|
974 | 974 | sub = ctx.sub(subpath) |
|
975 | 975 | ret = min(ret, sub.outgoing(ui, dest, opts)) |
|
976 | 976 | return ret |
|
977 | 977 | |
|
978 | 978 | limit = logcmdutil.getlimit(opts) |
|
979 | 979 | o, other = _outgoing(ui, repo, dest, opts) |
|
980 | 980 | if not o: |
|
981 | 981 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
982 | 982 | return recurse() |
|
983 | 983 | |
|
984 | 984 | if opts.get('newest_first'): |
|
985 | 985 | o.reverse() |
|
986 | 986 | ui.pager('outgoing') |
|
987 | 987 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
988 | 988 | count = 0 |
|
989 | 989 | for n in o: |
|
990 | 990 | if limit is not None and count >= limit: |
|
991 | 991 | break |
|
992 | 992 | parents = [p for p in repo.changelog.parents(n) if p != nullid] |
|
993 | 993 | if opts.get('no_merges') and len(parents) == 2: |
|
994 | 994 | continue |
|
995 | 995 | count += 1 |
|
996 | 996 | displayer.show(repo[n]) |
|
997 | 997 | displayer.close() |
|
998 | 998 | cmdutil.outgoinghooks(ui, repo, other, opts, o) |
|
999 | 999 | recurse() |
|
1000 | 1000 | return 0 # exit code is zero since we found outgoing changes |
|
1001 | 1001 | |
|
1002 | 1002 | def verify(repo): |
|
1003 | 1003 | """verify the consistency of a repository""" |
|
1004 | 1004 | ret = verifymod.verify(repo) |
|
1005 | 1005 | |
|
1006 | 1006 | # Broken subrepo references in hidden csets don't seem worth worrying about, |
|
1007 | 1007 | # since they can't be pushed/pulled, and --hidden can be used if they are a |
|
1008 | 1008 | # concern. |
|
1009 | 1009 | |
|
1010 | 1010 | # pathto() is needed for -R case |
|
1011 | 1011 | revs = repo.revs("filelog(%s)", |
|
1012 | 1012 | util.pathto(repo.root, repo.getcwd(), '.hgsubstate')) |
|
1013 | 1013 | |
|
1014 | 1014 | if revs: |
|
1015 | 1015 | repo.ui.status(_('checking subrepo links\n')) |
|
1016 | 1016 | for rev in revs: |
|
1017 | 1017 | ctx = repo[rev] |
|
1018 | 1018 | try: |
|
1019 | 1019 | for subpath in ctx.substate: |
|
1020 | 1020 | try: |
|
1021 | 1021 | ret = (ctx.sub(subpath, allowcreate=False).verify() |
|
1022 | 1022 | or ret) |
|
1023 | 1023 | except error.RepoError as e: |
|
1024 |
repo.ui.warn(('% |
|
|
1024 | repo.ui.warn(('%d: %s\n') % (rev, e)) | |
|
1025 | 1025 | except Exception: |
|
1026 | 1026 | repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') % |
|
1027 | 1027 | node.short(ctx.node())) |
|
1028 | 1028 | |
|
1029 | 1029 | return ret |
|
1030 | 1030 | |
|
1031 | 1031 | def remoteui(src, opts): |
|
1032 | 1032 | 'build a remote ui from ui or repo and opts' |
|
1033 | 1033 | if util.safehasattr(src, 'baseui'): # looks like a repository |
|
1034 | 1034 | dst = src.baseui.copy() # drop repo-specific config |
|
1035 | 1035 | src = src.ui # copy target options from repo |
|
1036 | 1036 | else: # assume it's a global ui object |
|
1037 | 1037 | dst = src.copy() # keep all global options |
|
1038 | 1038 | |
|
1039 | 1039 | # copy ssh-specific options |
|
1040 | 1040 | for o in 'ssh', 'remotecmd': |
|
1041 | 1041 | v = opts.get(o) or src.config('ui', o) |
|
1042 | 1042 | if v: |
|
1043 | 1043 | dst.setconfig("ui", o, v, 'copied') |
|
1044 | 1044 | |
|
1045 | 1045 | # copy bundle-specific options |
|
1046 | 1046 | r = src.config('bundle', 'mainreporoot') |
|
1047 | 1047 | if r: |
|
1048 | 1048 | dst.setconfig('bundle', 'mainreporoot', r, 'copied') |
|
1049 | 1049 | |
|
1050 | 1050 | # copy selected local settings to the remote ui |
|
1051 | 1051 | for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'): |
|
1052 | 1052 | for key, val in src.configitems(sect): |
|
1053 | 1053 | dst.setconfig(sect, key, val, 'copied') |
|
1054 | 1054 | v = src.config('web', 'cacerts') |
|
1055 | 1055 | if v: |
|
1056 | 1056 | dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') |
|
1057 | 1057 | |
|
1058 | 1058 | return dst |
|
1059 | 1059 | |
|
1060 | 1060 | # Files of interest |
|
1061 | 1061 | # Used to check if the repository has changed looking at mtime and size of |
|
1062 | 1062 | # these files. |
|
1063 | 1063 | foi = [('spath', '00changelog.i'), |
|
1064 | 1064 | ('spath', 'phaseroots'), # ! phase can change content at the same size |
|
1065 | 1065 | ('spath', 'obsstore'), |
|
1066 | 1066 | ('path', 'bookmarks'), # ! bookmark can change content at the same size |
|
1067 | 1067 | ] |
|
1068 | 1068 | |
|
1069 | 1069 | class cachedlocalrepo(object): |
|
1070 | 1070 | """Holds a localrepository that can be cached and reused.""" |
|
1071 | 1071 | |
|
1072 | 1072 | def __init__(self, repo): |
|
1073 | 1073 | """Create a new cached repo from an existing repo. |
|
1074 | 1074 | |
|
1075 | 1075 | We assume the passed in repo was recently created. If the |
|
1076 | 1076 | repo has changed between when it was created and when it was |
|
1077 | 1077 | turned into a cache, it may not refresh properly. |
|
1078 | 1078 | """ |
|
1079 | 1079 | assert isinstance(repo, localrepo.localrepository) |
|
1080 | 1080 | self._repo = repo |
|
1081 | 1081 | self._state, self.mtime = self._repostate() |
|
1082 | 1082 | self._filtername = repo.filtername |
|
1083 | 1083 | |
|
1084 | 1084 | def fetch(self): |
|
1085 | 1085 | """Refresh (if necessary) and return a repository. |
|
1086 | 1086 | |
|
1087 | 1087 | If the cached instance is out of date, it will be recreated |
|
1088 | 1088 | automatically and returned. |
|
1089 | 1089 | |
|
1090 | 1090 | Returns a tuple of the repo and a boolean indicating whether a new |
|
1091 | 1091 | repo instance was created. |
|
1092 | 1092 | """ |
|
1093 | 1093 | # We compare the mtimes and sizes of some well-known files to |
|
1094 | 1094 | # determine if the repo changed. This is not precise, as mtimes |
|
1095 | 1095 | # are susceptible to clock skew and imprecise filesystems and |
|
1096 | 1096 | # file content can change while maintaining the same size. |
|
1097 | 1097 | |
|
1098 | 1098 | state, mtime = self._repostate() |
|
1099 | 1099 | if state == self._state: |
|
1100 | 1100 | return self._repo, False |
|
1101 | 1101 | |
|
1102 | 1102 | repo = repository(self._repo.baseui, self._repo.url()) |
|
1103 | 1103 | if self._filtername: |
|
1104 | 1104 | self._repo = repo.filtered(self._filtername) |
|
1105 | 1105 | else: |
|
1106 | 1106 | self._repo = repo.unfiltered() |
|
1107 | 1107 | self._state = state |
|
1108 | 1108 | self.mtime = mtime |
|
1109 | 1109 | |
|
1110 | 1110 | return self._repo, True |
|
1111 | 1111 | |
|
1112 | 1112 | def _repostate(self): |
|
1113 | 1113 | state = [] |
|
1114 | 1114 | maxmtime = -1 |
|
1115 | 1115 | for attr, fname in foi: |
|
1116 | 1116 | prefix = getattr(self._repo, attr) |
|
1117 | 1117 | p = os.path.join(prefix, fname) |
|
1118 | 1118 | try: |
|
1119 | 1119 | st = os.stat(p) |
|
1120 | 1120 | except OSError: |
|
1121 | 1121 | st = os.stat(prefix) |
|
1122 | 1122 | state.append((st[stat.ST_MTIME], st.st_size)) |
|
1123 | 1123 | maxmtime = max(maxmtime, st[stat.ST_MTIME]) |
|
1124 | 1124 | |
|
1125 | 1125 | return tuple(state), maxmtime |
|
1126 | 1126 | |
|
1127 | 1127 | def copy(self): |
|
1128 | 1128 | """Obtain a copy of this class instance. |
|
1129 | 1129 | |
|
1130 | 1130 | A new localrepository instance is obtained. The new instance should be |
|
1131 | 1131 | completely independent of the original. |
|
1132 | 1132 | """ |
|
1133 | 1133 | repo = repository(self._repo.baseui, self._repo.origroot) |
|
1134 | 1134 | if self._filtername: |
|
1135 | 1135 | repo = repo.filtered(self._filtername) |
|
1136 | 1136 | else: |
|
1137 | 1137 | repo = repo.unfiltered() |
|
1138 | 1138 | c = cachedlocalrepo(repo) |
|
1139 | 1139 | c._state = self._state |
|
1140 | 1140 | c.mtime = self.mtime |
|
1141 | 1141 | return c |
General Comments 0
You need to be logged in to leave comments.
Login now