Show More
@@ -1,139 +1,66 b'' | |||||
1 | # extutil.py - useful utility methods for extensions |
|
1 | # extutil.py - useful utility methods for extensions | |
2 | # |
|
2 | # | |
3 | # Copyright 2016 Facebook |
|
3 | # Copyright 2016 Facebook | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import contextlib |
|
10 | import contextlib | |
11 | import errno |
|
11 | import errno | |
12 | import os |
|
12 | import os | |
13 | import subprocess |
|
|||
14 | import time |
|
13 | import time | |
15 |
|
14 | |||
16 | from mercurial import ( |
|
15 | from mercurial import ( | |
17 | error, |
|
16 | error, | |
18 | lock as lockmod, |
|
17 | lock as lockmod, | |
19 | pycompat, |
|
|||
20 | util, |
|
18 | util, | |
21 | vfs as vfsmod, |
|
19 | vfs as vfsmod, | |
22 | ) |
|
20 | ) | |
23 |
|
21 | |||
24 | if pycompat.iswindows: |
|
|||
25 | # no fork on Windows, but we can create a detached process |
|
|||
26 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx |
|
|||
27 | # No stdlib constant exists for this value |
|
|||
28 | DETACHED_PROCESS = 0x00000008 |
|
|||
29 | _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP |
|
|||
30 |
|
||||
31 | def runbgcommand(script, env, shell=False, stdout=None, stderr=None): |
|
|||
32 | '''Spawn a command without waiting for it to finish.''' |
|
|||
33 | # we can't use close_fds *and* redirect stdin. I'm not sure that we |
|
|||
34 | # need to because the detached process has no console connection. |
|
|||
35 | subprocess.Popen( |
|
|||
36 | script, shell=shell, env=env, close_fds=True, |
|
|||
37 | creationflags=_creationflags, stdout=stdout, stderr=stderr) |
|
|||
38 | else: |
|
|||
39 | def runbgcommand(cmd, env, shell=False, stdout=None, stderr=None): |
|
|||
40 | '''Spawn a command without waiting for it to finish.''' |
|
|||
41 | # double-fork to completely detach from the parent process |
|
|||
42 | # based on http://code.activestate.com/recipes/278731 |
|
|||
43 | pid = os.fork() |
|
|||
44 | if pid: |
|
|||
45 | # Parent process |
|
|||
46 | (_pid, status) = os.waitpid(pid, 0) |
|
|||
47 | if os.WIFEXITED(status): |
|
|||
48 | returncode = os.WEXITSTATUS(status) |
|
|||
49 | else: |
|
|||
50 | returncode = -os.WTERMSIG(status) |
|
|||
51 | if returncode != 0: |
|
|||
52 | # The child process's return code is 0 on success, an errno |
|
|||
53 | # value on failure, or 255 if we don't have a valid errno |
|
|||
54 | # value. |
|
|||
55 | # |
|
|||
56 | # (It would be slightly nicer to return the full exception info |
|
|||
57 | # over a pipe as the subprocess module does. For now it |
|
|||
58 | # doesn't seem worth adding that complexity here, though.) |
|
|||
59 | if returncode == 255: |
|
|||
60 | returncode = errno.EINVAL |
|
|||
61 | raise OSError(returncode, 'error running %r: %s' % |
|
|||
62 | (cmd, os.strerror(returncode))) |
|
|||
63 | return |
|
|||
64 |
|
||||
65 | returncode = 255 |
|
|||
66 | try: |
|
|||
67 | # Start a new session |
|
|||
68 | os.setsid() |
|
|||
69 |
|
||||
70 | stdin = open(os.devnull, 'r') |
|
|||
71 | if stdout is None: |
|
|||
72 | stdout = open(os.devnull, 'w') |
|
|||
73 | if stderr is None: |
|
|||
74 | stderr = open(os.devnull, 'w') |
|
|||
75 |
|
||||
76 | # connect stdin to devnull to make sure the subprocess can't |
|
|||
77 | # muck up that stream for mercurial. |
|
|||
78 | subprocess.Popen( |
|
|||
79 | cmd, shell=shell, env=env, close_fds=True, |
|
|||
80 | stdin=stdin, stdout=stdout, stderr=stderr) |
|
|||
81 | returncode = 0 |
|
|||
82 | except EnvironmentError as ex: |
|
|||
83 | returncode = (ex.errno & 0xff) |
|
|||
84 | if returncode == 0: |
|
|||
85 | # This shouldn't happen, but just in case make sure the |
|
|||
86 | # return code is never 0 here. |
|
|||
87 | returncode = 255 |
|
|||
88 | except Exception: |
|
|||
89 | returncode = 255 |
|
|||
90 | finally: |
|
|||
91 | # mission accomplished, this child needs to exit and not |
|
|||
92 | # continue the hg process here. |
|
|||
93 | os._exit(returncode) |
|
|||
94 |
|
||||
95 | @contextlib.contextmanager |
|
22 | @contextlib.contextmanager | |
96 | def flock(lockpath, description, timeout=-1): |
|
23 | def flock(lockpath, description, timeout=-1): | |
97 | """A flock based lock object. Currently it is always non-blocking. |
|
24 | """A flock based lock object. Currently it is always non-blocking. | |
98 |
|
25 | |||
99 | Note that since it is flock based, you can accidentally take it multiple |
|
26 | Note that since it is flock based, you can accidentally take it multiple | |
100 | times within one process and the first one to be released will release all |
|
27 | times within one process and the first one to be released will release all | |
101 | of them. So the caller needs to be careful to not create more than one |
|
28 | of them. So the caller needs to be careful to not create more than one | |
102 | instance per lock. |
|
29 | instance per lock. | |
103 | """ |
|
30 | """ | |
104 |
|
31 | |||
105 | # best effort lightweight lock |
|
32 | # best effort lightweight lock | |
106 | try: |
|
33 | try: | |
107 | import fcntl |
|
34 | import fcntl | |
108 | fcntl.flock |
|
35 | fcntl.flock | |
109 | except ImportError: |
|
36 | except ImportError: | |
110 | # fallback to Mercurial lock |
|
37 | # fallback to Mercurial lock | |
111 | vfs = vfsmod.vfs(os.path.dirname(lockpath)) |
|
38 | vfs = vfsmod.vfs(os.path.dirname(lockpath)) | |
112 | with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): |
|
39 | with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): | |
113 | yield |
|
40 | yield | |
114 | return |
|
41 | return | |
115 | # make sure lock file exists |
|
42 | # make sure lock file exists | |
116 | util.makedirs(os.path.dirname(lockpath)) |
|
43 | util.makedirs(os.path.dirname(lockpath)) | |
117 | with open(lockpath, 'a'): |
|
44 | with open(lockpath, 'a'): | |
118 | pass |
|
45 | pass | |
119 | lockfd = os.open(lockpath, os.O_RDONLY, 0o664) |
|
46 | lockfd = os.open(lockpath, os.O_RDONLY, 0o664) | |
120 | start = time.time() |
|
47 | start = time.time() | |
121 | while True: |
|
48 | while True: | |
122 | try: |
|
49 | try: | |
123 | fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) |
|
50 | fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) | |
124 | break |
|
51 | break | |
125 | except IOError as ex: |
|
52 | except IOError as ex: | |
126 | if ex.errno == errno.EAGAIN: |
|
53 | if ex.errno == errno.EAGAIN: | |
127 | if timeout != -1 and time.time() - start > timeout: |
|
54 | if timeout != -1 and time.time() - start > timeout: | |
128 | raise error.LockHeld(errno.EAGAIN, lockpath, description, |
|
55 | raise error.LockHeld(errno.EAGAIN, lockpath, description, | |
129 | '') |
|
56 | '') | |
130 | else: |
|
57 | else: | |
131 | time.sleep(0.05) |
|
58 | time.sleep(0.05) | |
132 | continue |
|
59 | continue | |
133 | raise |
|
60 | raise | |
134 |
|
61 | |||
135 | try: |
|
62 | try: | |
136 | yield |
|
63 | yield | |
137 | finally: |
|
64 | finally: | |
138 | fcntl.flock(lockfd, fcntl.LOCK_UN) |
|
65 | fcntl.flock(lockfd, fcntl.LOCK_UN) | |
139 | os.close(lockfd) |
|
66 | os.close(lockfd) |
@@ -1,784 +1,784 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import os |
|
3 | import os | |
4 | import time |
|
4 | import time | |
5 |
|
5 | |||
6 | from mercurial.i18n import _ |
|
6 | from mercurial.i18n import _ | |
7 | from mercurial.node import ( |
|
7 | from mercurial.node import ( | |
8 | nullid, |
|
8 | nullid, | |
9 | short, |
|
9 | short, | |
10 | ) |
|
10 | ) | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | encoding, |
|
12 | encoding, | |
13 | error, |
|
13 | error, | |
14 | mdiff, |
|
14 | mdiff, | |
15 | policy, |
|
15 | policy, | |
16 | pycompat, |
|
16 | pycompat, | |
17 | scmutil, |
|
17 | scmutil, | |
18 | util, |
|
18 | util, | |
19 | vfs, |
|
19 | vfs, | |
20 | ) |
|
20 | ) | |
21 | from mercurial.utils import procutil |
|
21 | from mercurial.utils import procutil | |
22 | from . import ( |
|
22 | from . import ( | |
23 | constants, |
|
23 | constants, | |
24 | contentstore, |
|
24 | contentstore, | |
25 | datapack, |
|
25 | datapack, | |
26 | extutil, |
|
26 | extutil, | |
27 | historypack, |
|
27 | historypack, | |
28 | metadatastore, |
|
28 | metadatastore, | |
29 | shallowutil, |
|
29 | shallowutil, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
32 | osutil = policy.importmod(r'osutil') |
|
32 | osutil = policy.importmod(r'osutil') | |
33 |
|
33 | |||
34 | class RepackAlreadyRunning(error.Abort): |
|
34 | class RepackAlreadyRunning(error.Abort): | |
35 | pass |
|
35 | pass | |
36 |
|
36 | |||
37 | if util.safehasattr(util, '_hgexecutable'): |
|
37 | if util.safehasattr(util, '_hgexecutable'): | |
38 | # Before 5be286db |
|
38 | # Before 5be286db | |
39 | _hgexecutable = util.hgexecutable |
|
39 | _hgexecutable = util.hgexecutable | |
40 | else: |
|
40 | else: | |
41 | from mercurial.utils import procutil |
|
41 | from mercurial.utils import procutil | |
42 | _hgexecutable = procutil.hgexecutable |
|
42 | _hgexecutable = procutil.hgexecutable | |
43 |
|
43 | |||
44 | def backgroundrepack(repo, incremental=True, packsonly=False): |
|
44 | def backgroundrepack(repo, incremental=True, packsonly=False): | |
45 | cmd = [_hgexecutable(), '-R', repo.origroot, 'repack'] |
|
45 | cmd = [_hgexecutable(), '-R', repo.origroot, 'repack'] | |
46 | msg = _("(running background repack)\n") |
|
46 | msg = _("(running background repack)\n") | |
47 | if incremental: |
|
47 | if incremental: | |
48 | cmd.append('--incremental') |
|
48 | cmd.append('--incremental') | |
49 | msg = _("(running background incremental repack)\n") |
|
49 | msg = _("(running background incremental repack)\n") | |
50 | if packsonly: |
|
50 | if packsonly: | |
51 | cmd.append('--packsonly') |
|
51 | cmd.append('--packsonly') | |
52 | repo.ui.warn(msg) |
|
52 | repo.ui.warn(msg) | |
53 |
|
|
53 | procutil.runbgcommand(cmd, encoding.environ) | |
54 |
|
54 | |||
55 | def fullrepack(repo, options=None): |
|
55 | def fullrepack(repo, options=None): | |
56 | """If ``packsonly`` is True, stores creating only loose objects are skipped. |
|
56 | """If ``packsonly`` is True, stores creating only loose objects are skipped. | |
57 | """ |
|
57 | """ | |
58 | if util.safehasattr(repo, 'shareddatastores'): |
|
58 | if util.safehasattr(repo, 'shareddatastores'): | |
59 | datasource = contentstore.unioncontentstore( |
|
59 | datasource = contentstore.unioncontentstore( | |
60 | *repo.shareddatastores) |
|
60 | *repo.shareddatastores) | |
61 | historysource = metadatastore.unionmetadatastore( |
|
61 | historysource = metadatastore.unionmetadatastore( | |
62 | *repo.sharedhistorystores, |
|
62 | *repo.sharedhistorystores, | |
63 | allowincomplete=True) |
|
63 | allowincomplete=True) | |
64 |
|
64 | |||
65 | packpath = shallowutil.getcachepackpath( |
|
65 | packpath = shallowutil.getcachepackpath( | |
66 | repo, |
|
66 | repo, | |
67 | constants.FILEPACK_CATEGORY) |
|
67 | constants.FILEPACK_CATEGORY) | |
68 | _runrepack(repo, datasource, historysource, packpath, |
|
68 | _runrepack(repo, datasource, historysource, packpath, | |
69 | constants.FILEPACK_CATEGORY, options=options) |
|
69 | constants.FILEPACK_CATEGORY, options=options) | |
70 |
|
70 | |||
71 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
71 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
72 | localdata, shareddata = _getmanifeststores(repo) |
|
72 | localdata, shareddata = _getmanifeststores(repo) | |
73 | lpackpath, ldstores, lhstores = localdata |
|
73 | lpackpath, ldstores, lhstores = localdata | |
74 | spackpath, sdstores, shstores = shareddata |
|
74 | spackpath, sdstores, shstores = shareddata | |
75 |
|
75 | |||
76 | # Repack the shared manifest store |
|
76 | # Repack the shared manifest store | |
77 | datasource = contentstore.unioncontentstore(*sdstores) |
|
77 | datasource = contentstore.unioncontentstore(*sdstores) | |
78 | historysource = metadatastore.unionmetadatastore( |
|
78 | historysource = metadatastore.unionmetadatastore( | |
79 | *shstores, |
|
79 | *shstores, | |
80 | allowincomplete=True) |
|
80 | allowincomplete=True) | |
81 | _runrepack(repo, datasource, historysource, spackpath, |
|
81 | _runrepack(repo, datasource, historysource, spackpath, | |
82 | constants.TREEPACK_CATEGORY, options=options) |
|
82 | constants.TREEPACK_CATEGORY, options=options) | |
83 |
|
83 | |||
84 | # Repack the local manifest store |
|
84 | # Repack the local manifest store | |
85 | datasource = contentstore.unioncontentstore( |
|
85 | datasource = contentstore.unioncontentstore( | |
86 | *ldstores, |
|
86 | *ldstores, | |
87 | allowincomplete=True) |
|
87 | allowincomplete=True) | |
88 | historysource = metadatastore.unionmetadatastore( |
|
88 | historysource = metadatastore.unionmetadatastore( | |
89 | *lhstores, |
|
89 | *lhstores, | |
90 | allowincomplete=True) |
|
90 | allowincomplete=True) | |
91 | _runrepack(repo, datasource, historysource, lpackpath, |
|
91 | _runrepack(repo, datasource, historysource, lpackpath, | |
92 | constants.TREEPACK_CATEGORY, options=options) |
|
92 | constants.TREEPACK_CATEGORY, options=options) | |
93 |
|
93 | |||
94 | def incrementalrepack(repo, options=None): |
|
94 | def incrementalrepack(repo, options=None): | |
95 | """This repacks the repo by looking at the distribution of pack files in the |
|
95 | """This repacks the repo by looking at the distribution of pack files in the | |
96 | repo and performing the most minimal repack to keep the repo in good shape. |
|
96 | repo and performing the most minimal repack to keep the repo in good shape. | |
97 | """ |
|
97 | """ | |
98 | if util.safehasattr(repo, 'shareddatastores'): |
|
98 | if util.safehasattr(repo, 'shareddatastores'): | |
99 | packpath = shallowutil.getcachepackpath( |
|
99 | packpath = shallowutil.getcachepackpath( | |
100 | repo, |
|
100 | repo, | |
101 | constants.FILEPACK_CATEGORY) |
|
101 | constants.FILEPACK_CATEGORY) | |
102 | _incrementalrepack(repo, |
|
102 | _incrementalrepack(repo, | |
103 | repo.shareddatastores, |
|
103 | repo.shareddatastores, | |
104 | repo.sharedhistorystores, |
|
104 | repo.sharedhistorystores, | |
105 | packpath, |
|
105 | packpath, | |
106 | constants.FILEPACK_CATEGORY, |
|
106 | constants.FILEPACK_CATEGORY, | |
107 | options=options) |
|
107 | options=options) | |
108 |
|
108 | |||
109 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
109 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
110 | localdata, shareddata = _getmanifeststores(repo) |
|
110 | localdata, shareddata = _getmanifeststores(repo) | |
111 | lpackpath, ldstores, lhstores = localdata |
|
111 | lpackpath, ldstores, lhstores = localdata | |
112 | spackpath, sdstores, shstores = shareddata |
|
112 | spackpath, sdstores, shstores = shareddata | |
113 |
|
113 | |||
114 | # Repack the shared manifest store |
|
114 | # Repack the shared manifest store | |
115 | _incrementalrepack(repo, |
|
115 | _incrementalrepack(repo, | |
116 | sdstores, |
|
116 | sdstores, | |
117 | shstores, |
|
117 | shstores, | |
118 | spackpath, |
|
118 | spackpath, | |
119 | constants.TREEPACK_CATEGORY, |
|
119 | constants.TREEPACK_CATEGORY, | |
120 | options=options) |
|
120 | options=options) | |
121 |
|
121 | |||
122 | # Repack the local manifest store |
|
122 | # Repack the local manifest store | |
123 | _incrementalrepack(repo, |
|
123 | _incrementalrepack(repo, | |
124 | ldstores, |
|
124 | ldstores, | |
125 | lhstores, |
|
125 | lhstores, | |
126 | lpackpath, |
|
126 | lpackpath, | |
127 | constants.TREEPACK_CATEGORY, |
|
127 | constants.TREEPACK_CATEGORY, | |
128 | allowincompletedata=True, |
|
128 | allowincompletedata=True, | |
129 | options=options) |
|
129 | options=options) | |
130 |
|
130 | |||
131 | def _getmanifeststores(repo): |
|
131 | def _getmanifeststores(repo): | |
132 | shareddatastores = repo.manifestlog.shareddatastores |
|
132 | shareddatastores = repo.manifestlog.shareddatastores | |
133 | localdatastores = repo.manifestlog.localdatastores |
|
133 | localdatastores = repo.manifestlog.localdatastores | |
134 | sharedhistorystores = repo.manifestlog.sharedhistorystores |
|
134 | sharedhistorystores = repo.manifestlog.sharedhistorystores | |
135 | localhistorystores = repo.manifestlog.localhistorystores |
|
135 | localhistorystores = repo.manifestlog.localhistorystores | |
136 |
|
136 | |||
137 | sharedpackpath = shallowutil.getcachepackpath(repo, |
|
137 | sharedpackpath = shallowutil.getcachepackpath(repo, | |
138 | constants.TREEPACK_CATEGORY) |
|
138 | constants.TREEPACK_CATEGORY) | |
139 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, |
|
139 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, | |
140 | constants.TREEPACK_CATEGORY) |
|
140 | constants.TREEPACK_CATEGORY) | |
141 |
|
141 | |||
142 | return ((localpackpath, localdatastores, localhistorystores), |
|
142 | return ((localpackpath, localdatastores, localhistorystores), | |
143 | (sharedpackpath, shareddatastores, sharedhistorystores)) |
|
143 | (sharedpackpath, shareddatastores, sharedhistorystores)) | |
144 |
|
144 | |||
145 | def _topacks(packpath, files, constructor): |
|
145 | def _topacks(packpath, files, constructor): | |
146 | paths = list(os.path.join(packpath, p) for p in files) |
|
146 | paths = list(os.path.join(packpath, p) for p in files) | |
147 | packs = list(constructor(p) for p in paths) |
|
147 | packs = list(constructor(p) for p in paths) | |
148 | return packs |
|
148 | return packs | |
149 |
|
149 | |||
150 | def _deletebigpacks(repo, folder, files): |
|
150 | def _deletebigpacks(repo, folder, files): | |
151 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
|
151 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. | |
152 |
|
152 | |||
153 | Returns ``files` with the removed files omitted.""" |
|
153 | Returns ``files` with the removed files omitted.""" | |
154 | maxsize = repo.ui.configbytes("packs", "maxpacksize") |
|
154 | maxsize = repo.ui.configbytes("packs", "maxpacksize") | |
155 | if maxsize <= 0: |
|
155 | if maxsize <= 0: | |
156 | return files |
|
156 | return files | |
157 |
|
157 | |||
158 | # This only considers datapacks today, but we could broaden it to include |
|
158 | # This only considers datapacks today, but we could broaden it to include | |
159 | # historypacks. |
|
159 | # historypacks. | |
160 | VALIDEXTS = [".datapack", ".dataidx"] |
|
160 | VALIDEXTS = [".datapack", ".dataidx"] | |
161 |
|
161 | |||
162 | # Either an oversize index or datapack will trigger cleanup of the whole |
|
162 | # Either an oversize index or datapack will trigger cleanup of the whole | |
163 | # pack: |
|
163 | # pack: | |
164 | oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files |
|
164 | oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files | |
165 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] |
|
165 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] | |
166 | in VALIDEXTS))]) |
|
166 | in VALIDEXTS))]) | |
167 |
|
167 | |||
168 | for rootfname in oversized: |
|
168 | for rootfname in oversized: | |
169 | rootpath = os.path.join(folder, rootfname) |
|
169 | rootpath = os.path.join(folder, rootfname) | |
170 | for ext in VALIDEXTS: |
|
170 | for ext in VALIDEXTS: | |
171 | path = rootpath + ext |
|
171 | path = rootpath + ext | |
172 | repo.ui.debug('removing oversize packfile %s (%s)\n' % |
|
172 | repo.ui.debug('removing oversize packfile %s (%s)\n' % | |
173 | (path, util.bytecount(os.stat(path).st_size))) |
|
173 | (path, util.bytecount(os.stat(path).st_size))) | |
174 | os.unlink(path) |
|
174 | os.unlink(path) | |
175 | return [row for row in files if os.path.basename(row[0]) not in oversized] |
|
175 | return [row for row in files if os.path.basename(row[0]) not in oversized] | |
176 |
|
176 | |||
177 | def _incrementalrepack(repo, datastore, historystore, packpath, category, |
|
177 | def _incrementalrepack(repo, datastore, historystore, packpath, category, | |
178 | allowincompletedata=False, options=None): |
|
178 | allowincompletedata=False, options=None): | |
179 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
179 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
180 |
|
180 | |||
181 | files = osutil.listdir(packpath, stat=True) |
|
181 | files = osutil.listdir(packpath, stat=True) | |
182 | files = _deletebigpacks(repo, packpath, files) |
|
182 | files = _deletebigpacks(repo, packpath, files) | |
183 | datapacks = _topacks(packpath, |
|
183 | datapacks = _topacks(packpath, | |
184 | _computeincrementaldatapack(repo.ui, files), |
|
184 | _computeincrementaldatapack(repo.ui, files), | |
185 | datapack.datapack) |
|
185 | datapack.datapack) | |
186 | datapacks.extend(s for s in datastore |
|
186 | datapacks.extend(s for s in datastore | |
187 | if not isinstance(s, datapack.datapackstore)) |
|
187 | if not isinstance(s, datapack.datapackstore)) | |
188 |
|
188 | |||
189 | historypacks = _topacks(packpath, |
|
189 | historypacks = _topacks(packpath, | |
190 | _computeincrementalhistorypack(repo.ui, files), |
|
190 | _computeincrementalhistorypack(repo.ui, files), | |
191 | historypack.historypack) |
|
191 | historypack.historypack) | |
192 | historypacks.extend(s for s in historystore |
|
192 | historypacks.extend(s for s in historystore | |
193 | if not isinstance(s, historypack.historypackstore)) |
|
193 | if not isinstance(s, historypack.historypackstore)) | |
194 |
|
194 | |||
195 | # ``allhistory{files,packs}`` contains all known history packs, even ones we |
|
195 | # ``allhistory{files,packs}`` contains all known history packs, even ones we | |
196 | # don't plan to repack. They are used during the datapack repack to ensure |
|
196 | # don't plan to repack. They are used during the datapack repack to ensure | |
197 | # good ordering of nodes. |
|
197 | # good ordering of nodes. | |
198 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, |
|
198 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, | |
199 | historypack.INDEXSUFFIX) |
|
199 | historypack.INDEXSUFFIX) | |
200 | allhistorypacks = _topacks(packpath, |
|
200 | allhistorypacks = _topacks(packpath, | |
201 | (f for f, mode, stat in allhistoryfiles), |
|
201 | (f for f, mode, stat in allhistoryfiles), | |
202 | historypack.historypack) |
|
202 | historypack.historypack) | |
203 | allhistorypacks.extend(s for s in historystore |
|
203 | allhistorypacks.extend(s for s in historystore | |
204 | if not isinstance(s, historypack.historypackstore)) |
|
204 | if not isinstance(s, historypack.historypackstore)) | |
205 | _runrepack(repo, |
|
205 | _runrepack(repo, | |
206 | contentstore.unioncontentstore( |
|
206 | contentstore.unioncontentstore( | |
207 | *datapacks, |
|
207 | *datapacks, | |
208 | allowincomplete=allowincompletedata), |
|
208 | allowincomplete=allowincompletedata), | |
209 | metadatastore.unionmetadatastore( |
|
209 | metadatastore.unionmetadatastore( | |
210 | *historypacks, |
|
210 | *historypacks, | |
211 | allowincomplete=True), |
|
211 | allowincomplete=True), | |
212 | packpath, category, |
|
212 | packpath, category, | |
213 | fullhistory=metadatastore.unionmetadatastore( |
|
213 | fullhistory=metadatastore.unionmetadatastore( | |
214 | *allhistorypacks, |
|
214 | *allhistorypacks, | |
215 | allowincomplete=True), |
|
215 | allowincomplete=True), | |
216 | options=options) |
|
216 | options=options) | |
217 |
|
217 | |||
218 | def _computeincrementaldatapack(ui, files): |
|
218 | def _computeincrementaldatapack(ui, files): | |
219 | opts = { |
|
219 | opts = { | |
220 | 'gencountlimit' : ui.configint( |
|
220 | 'gencountlimit' : ui.configint( | |
221 | 'remotefilelog', 'data.gencountlimit'), |
|
221 | 'remotefilelog', 'data.gencountlimit'), | |
222 | 'generations' : ui.configlist( |
|
222 | 'generations' : ui.configlist( | |
223 | 'remotefilelog', 'data.generations'), |
|
223 | 'remotefilelog', 'data.generations'), | |
224 | 'maxrepackpacks' : ui.configint( |
|
224 | 'maxrepackpacks' : ui.configint( | |
225 | 'remotefilelog', 'data.maxrepackpacks'), |
|
225 | 'remotefilelog', 'data.maxrepackpacks'), | |
226 | 'repackmaxpacksize' : ui.configbytes( |
|
226 | 'repackmaxpacksize' : ui.configbytes( | |
227 | 'remotefilelog', 'data.repackmaxpacksize'), |
|
227 | 'remotefilelog', 'data.repackmaxpacksize'), | |
228 | 'repacksizelimit' : ui.configbytes( |
|
228 | 'repacksizelimit' : ui.configbytes( | |
229 | 'remotefilelog', 'data.repacksizelimit'), |
|
229 | 'remotefilelog', 'data.repacksizelimit'), | |
230 | } |
|
230 | } | |
231 |
|
231 | |||
232 | packfiles = _allpackfileswithsuffix( |
|
232 | packfiles = _allpackfileswithsuffix( | |
233 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) |
|
233 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) | |
234 | return _computeincrementalpack(packfiles, opts) |
|
234 | return _computeincrementalpack(packfiles, opts) | |
235 |
|
235 | |||
236 | def _computeincrementalhistorypack(ui, files): |
|
236 | def _computeincrementalhistorypack(ui, files): | |
237 | opts = { |
|
237 | opts = { | |
238 | 'gencountlimit' : ui.configint( |
|
238 | 'gencountlimit' : ui.configint( | |
239 | 'remotefilelog', 'history.gencountlimit'), |
|
239 | 'remotefilelog', 'history.gencountlimit'), | |
240 | 'generations' : ui.configlist( |
|
240 | 'generations' : ui.configlist( | |
241 | 'remotefilelog', 'history.generations', ['100MB']), |
|
241 | 'remotefilelog', 'history.generations', ['100MB']), | |
242 | 'maxrepackpacks' : ui.configint( |
|
242 | 'maxrepackpacks' : ui.configint( | |
243 | 'remotefilelog', 'history.maxrepackpacks'), |
|
243 | 'remotefilelog', 'history.maxrepackpacks'), | |
244 | 'repackmaxpacksize' : ui.configbytes( |
|
244 | 'repackmaxpacksize' : ui.configbytes( | |
245 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), |
|
245 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), | |
246 | 'repacksizelimit' : ui.configbytes( |
|
246 | 'repacksizelimit' : ui.configbytes( | |
247 | 'remotefilelog', 'history.repacksizelimit'), |
|
247 | 'remotefilelog', 'history.repacksizelimit'), | |
248 | } |
|
248 | } | |
249 |
|
249 | |||
250 | packfiles = _allpackfileswithsuffix( |
|
250 | packfiles = _allpackfileswithsuffix( | |
251 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) |
|
251 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) | |
252 | return _computeincrementalpack(packfiles, opts) |
|
252 | return _computeincrementalpack(packfiles, opts) | |
253 |
|
253 | |||
254 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): |
|
254 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): | |
255 | result = [] |
|
255 | result = [] | |
256 | fileset = set(fn for fn, mode, stat in files) |
|
256 | fileset = set(fn for fn, mode, stat in files) | |
257 | for filename, mode, stat in files: |
|
257 | for filename, mode, stat in files: | |
258 | if not filename.endswith(packsuffix): |
|
258 | if not filename.endswith(packsuffix): | |
259 | continue |
|
259 | continue | |
260 |
|
260 | |||
261 | prefix = filename[:-len(packsuffix)] |
|
261 | prefix = filename[:-len(packsuffix)] | |
262 |
|
262 | |||
263 | # Don't process a pack if it doesn't have an index. |
|
263 | # Don't process a pack if it doesn't have an index. | |
264 | if (prefix + indexsuffix) not in fileset: |
|
264 | if (prefix + indexsuffix) not in fileset: | |
265 | continue |
|
265 | continue | |
266 | result.append((prefix, mode, stat)) |
|
266 | result.append((prefix, mode, stat)) | |
267 |
|
267 | |||
268 | return result |
|
268 | return result | |
269 |
|
269 | |||
270 | def _computeincrementalpack(files, opts): |
|
270 | def _computeincrementalpack(files, opts): | |
271 | """Given a set of pack files along with the configuration options, this |
|
271 | """Given a set of pack files along with the configuration options, this | |
272 | function computes the list of files that should be packed as part of an |
|
272 | function computes the list of files that should be packed as part of an | |
273 | incremental repack. |
|
273 | incremental repack. | |
274 |
|
274 | |||
275 | It tries to strike a balance between keeping incremental repacks cheap (i.e. |
|
275 | It tries to strike a balance between keeping incremental repacks cheap (i.e. | |
276 | packing small things when possible, and rolling the packs up to the big ones |
|
276 | packing small things when possible, and rolling the packs up to the big ones | |
277 | over time). |
|
277 | over time). | |
278 | """ |
|
278 | """ | |
279 |
|
279 | |||
280 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), |
|
280 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), | |
281 | reverse=True)) |
|
281 | reverse=True)) | |
282 | limits.append(0) |
|
282 | limits.append(0) | |
283 |
|
283 | |||
284 | # Group the packs by generation (i.e. by size) |
|
284 | # Group the packs by generation (i.e. by size) | |
285 | generations = [] |
|
285 | generations = [] | |
286 | for i in pycompat.xrange(len(limits)): |
|
286 | for i in pycompat.xrange(len(limits)): | |
287 | generations.append([]) |
|
287 | generations.append([]) | |
288 |
|
288 | |||
289 | sizes = {} |
|
289 | sizes = {} | |
290 | for prefix, mode, stat in files: |
|
290 | for prefix, mode, stat in files: | |
291 | size = stat.st_size |
|
291 | size = stat.st_size | |
292 | if size > opts['repackmaxpacksize']: |
|
292 | if size > opts['repackmaxpacksize']: | |
293 | continue |
|
293 | continue | |
294 |
|
294 | |||
295 | sizes[prefix] = size |
|
295 | sizes[prefix] = size | |
296 | for i, limit in enumerate(limits): |
|
296 | for i, limit in enumerate(limits): | |
297 | if size > limit: |
|
297 | if size > limit: | |
298 | generations[i].append(prefix) |
|
298 | generations[i].append(prefix) | |
299 | break |
|
299 | break | |
300 |
|
300 | |||
301 | # Steps for picking what packs to repack: |
|
301 | # Steps for picking what packs to repack: | |
302 | # 1. Pick the largest generation with > gencountlimit pack files. |
|
302 | # 1. Pick the largest generation with > gencountlimit pack files. | |
303 | # 2. Take the smallest three packs. |
|
303 | # 2. Take the smallest three packs. | |
304 | # 3. While total-size-of-packs < repacksizelimit: add another pack |
|
304 | # 3. While total-size-of-packs < repacksizelimit: add another pack | |
305 |
|
305 | |||
306 | # Find the largest generation with more than gencountlimit packs |
|
306 | # Find the largest generation with more than gencountlimit packs | |
307 | genpacks = [] |
|
307 | genpacks = [] | |
308 | for i, limit in enumerate(limits): |
|
308 | for i, limit in enumerate(limits): | |
309 | if len(generations[i]) > opts['gencountlimit']: |
|
309 | if len(generations[i]) > opts['gencountlimit']: | |
310 | # Sort to be smallest last, for easy popping later |
|
310 | # Sort to be smallest last, for easy popping later | |
311 | genpacks.extend(sorted(generations[i], reverse=True, |
|
311 | genpacks.extend(sorted(generations[i], reverse=True, | |
312 | key=lambda x: sizes[x])) |
|
312 | key=lambda x: sizes[x])) | |
313 | break |
|
313 | break | |
314 |
|
314 | |||
315 | # Take as many packs from the generation as we can |
|
315 | # Take as many packs from the generation as we can | |
316 | chosenpacks = genpacks[-3:] |
|
316 | chosenpacks = genpacks[-3:] | |
317 | genpacks = genpacks[:-3] |
|
317 | genpacks = genpacks[:-3] | |
318 | repacksize = sum(sizes[n] for n in chosenpacks) |
|
318 | repacksize = sum(sizes[n] for n in chosenpacks) | |
319 | while (repacksize < opts['repacksizelimit'] and genpacks and |
|
319 | while (repacksize < opts['repacksizelimit'] and genpacks and | |
320 | len(chosenpacks) < opts['maxrepackpacks']): |
|
320 | len(chosenpacks) < opts['maxrepackpacks']): | |
321 | chosenpacks.append(genpacks.pop()) |
|
321 | chosenpacks.append(genpacks.pop()) | |
322 | repacksize += sizes[chosenpacks[-1]] |
|
322 | repacksize += sizes[chosenpacks[-1]] | |
323 |
|
323 | |||
324 | return chosenpacks |
|
324 | return chosenpacks | |
325 |
|
325 | |||
326 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, |
|
326 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, | |
327 | options=None): |
|
327 | options=None): | |
328 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
328 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
329 |
|
329 | |||
330 | def isold(repo, filename, node): |
|
330 | def isold(repo, filename, node): | |
331 | """Check if the file node is older than a limit. |
|
331 | """Check if the file node is older than a limit. | |
332 | Unless a limit is specified in the config the default limit is taken. |
|
332 | Unless a limit is specified in the config the default limit is taken. | |
333 | """ |
|
333 | """ | |
334 | filectx = repo.filectx(filename, fileid=node) |
|
334 | filectx = repo.filectx(filename, fileid=node) | |
335 | filetime = repo[filectx.linkrev()].date() |
|
335 | filetime = repo[filectx.linkrev()].date() | |
336 |
|
336 | |||
337 | ttl = repo.ui.configint('remotefilelog', 'nodettl') |
|
337 | ttl = repo.ui.configint('remotefilelog', 'nodettl') | |
338 |
|
338 | |||
339 | limit = time.time() - ttl |
|
339 | limit = time.time() - ttl | |
340 | return filetime[0] < limit |
|
340 | return filetime[0] < limit | |
341 |
|
341 | |||
342 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
342 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') | |
343 | if not fullhistory: |
|
343 | if not fullhistory: | |
344 | fullhistory = history |
|
344 | fullhistory = history | |
345 | packer = repacker(repo, data, history, fullhistory, category, |
|
345 | packer = repacker(repo, data, history, fullhistory, category, | |
346 | gc=garbagecollect, isold=isold, options=options) |
|
346 | gc=garbagecollect, isold=isold, options=options) | |
347 |
|
347 | |||
348 | # internal config: remotefilelog.datapackversion |
|
348 | # internal config: remotefilelog.datapackversion | |
349 | dv = repo.ui.configint('remotefilelog', 'datapackversion', 0) |
|
349 | dv = repo.ui.configint('remotefilelog', 'datapackversion', 0) | |
350 |
|
350 | |||
351 | with datapack.mutabledatapack(repo.ui, packpath, version=dv) as dpack: |
|
351 | with datapack.mutabledatapack(repo.ui, packpath, version=dv) as dpack: | |
352 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
|
352 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: | |
353 | try: |
|
353 | try: | |
354 | packer.run(dpack, hpack) |
|
354 | packer.run(dpack, hpack) | |
355 | except error.LockHeld: |
|
355 | except error.LockHeld: | |
356 | raise RepackAlreadyRunning(_("skipping repack - another repack " |
|
356 | raise RepackAlreadyRunning(_("skipping repack - another repack " | |
357 | "is already running")) |
|
357 | "is already running")) | |
358 |
|
358 | |||
359 | def keepset(repo, keyfn, lastkeepkeys=None): |
|
359 | def keepset(repo, keyfn, lastkeepkeys=None): | |
360 | """Computes a keepset which is not garbage collected. |
|
360 | """Computes a keepset which is not garbage collected. | |
361 | 'keyfn' is a function that maps filename, node to a unique key. |
|
361 | 'keyfn' is a function that maps filename, node to a unique key. | |
362 | 'lastkeepkeys' is an optional argument and if provided the keepset |
|
362 | 'lastkeepkeys' is an optional argument and if provided the keepset | |
363 | function updates lastkeepkeys with more keys and returns the result. |
|
363 | function updates lastkeepkeys with more keys and returns the result. | |
364 | """ |
|
364 | """ | |
365 | if not lastkeepkeys: |
|
365 | if not lastkeepkeys: | |
366 | keepkeys = set() |
|
366 | keepkeys = set() | |
367 | else: |
|
367 | else: | |
368 | keepkeys = lastkeepkeys |
|
368 | keepkeys = lastkeepkeys | |
369 |
|
369 | |||
370 | # We want to keep: |
|
370 | # We want to keep: | |
371 | # 1. Working copy parent |
|
371 | # 1. Working copy parent | |
372 | # 2. Draft commits |
|
372 | # 2. Draft commits | |
373 | # 3. Parents of draft commits |
|
373 | # 3. Parents of draft commits | |
374 | # 4. Pullprefetch and bgprefetchrevs revsets if specified |
|
374 | # 4. Pullprefetch and bgprefetchrevs revsets if specified | |
375 | revs = ['.', 'draft()', 'parents(draft())'] |
|
375 | revs = ['.', 'draft()', 'parents(draft())'] | |
376 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) |
|
376 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) | |
377 | if prefetchrevs: |
|
377 | if prefetchrevs: | |
378 | revs.append('(%s)' % prefetchrevs) |
|
378 | revs.append('(%s)' % prefetchrevs) | |
379 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
379 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) | |
380 | if prefetchrevs: |
|
380 | if prefetchrevs: | |
381 | revs.append('(%s)' % prefetchrevs) |
|
381 | revs.append('(%s)' % prefetchrevs) | |
382 | revs = '+'.join(revs) |
|
382 | revs = '+'.join(revs) | |
383 |
|
383 | |||
384 | revs = ['sort((%s), "topo")' % revs] |
|
384 | revs = ['sort((%s), "topo")' % revs] | |
385 | keep = scmutil.revrange(repo, revs) |
|
385 | keep = scmutil.revrange(repo, revs) | |
386 |
|
386 | |||
387 | processed = set() |
|
387 | processed = set() | |
388 | lastmanifest = None |
|
388 | lastmanifest = None | |
389 |
|
389 | |||
390 | # process the commits in toposorted order starting from the oldest |
|
390 | # process the commits in toposorted order starting from the oldest | |
391 | for r in reversed(keep._list): |
|
391 | for r in reversed(keep._list): | |
392 | if repo[r].p1().rev() in processed: |
|
392 | if repo[r].p1().rev() in processed: | |
393 | # if the direct parent has already been processed |
|
393 | # if the direct parent has already been processed | |
394 | # then we only need to process the delta |
|
394 | # then we only need to process the delta | |
395 | m = repo[r].manifestctx().readdelta() |
|
395 | m = repo[r].manifestctx().readdelta() | |
396 | else: |
|
396 | else: | |
397 | # otherwise take the manifest and diff it |
|
397 | # otherwise take the manifest and diff it | |
398 | # with the previous manifest if one exists |
|
398 | # with the previous manifest if one exists | |
399 | if lastmanifest: |
|
399 | if lastmanifest: | |
400 | m = repo[r].manifest().diff(lastmanifest) |
|
400 | m = repo[r].manifest().diff(lastmanifest) | |
401 | else: |
|
401 | else: | |
402 | m = repo[r].manifest() |
|
402 | m = repo[r].manifest() | |
403 | lastmanifest = repo[r].manifest() |
|
403 | lastmanifest = repo[r].manifest() | |
404 | processed.add(r) |
|
404 | processed.add(r) | |
405 |
|
405 | |||
406 | # populate keepkeys with keys from the current manifest |
|
406 | # populate keepkeys with keys from the current manifest | |
407 | if type(m) is dict: |
|
407 | if type(m) is dict: | |
408 | # m is a result of diff of two manifests and is a dictionary that |
|
408 | # m is a result of diff of two manifests and is a dictionary that | |
409 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple |
|
409 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple | |
410 | for filename, diff in m.iteritems(): |
|
410 | for filename, diff in m.iteritems(): | |
411 | if diff[0][0] is not None: |
|
411 | if diff[0][0] is not None: | |
412 | keepkeys.add(keyfn(filename, diff[0][0])) |
|
412 | keepkeys.add(keyfn(filename, diff[0][0])) | |
413 | else: |
|
413 | else: | |
414 | # m is a manifest object |
|
414 | # m is a manifest object | |
415 | for filename, filenode in m.iteritems(): |
|
415 | for filename, filenode in m.iteritems(): | |
416 | keepkeys.add(keyfn(filename, filenode)) |
|
416 | keepkeys.add(keyfn(filename, filenode)) | |
417 |
|
417 | |||
418 | return keepkeys |
|
418 | return keepkeys | |
419 |
|
419 | |||
420 | class repacker(object): |
|
420 | class repacker(object): | |
421 | """Class for orchestrating the repack of data and history information into a |
|
421 | """Class for orchestrating the repack of data and history information into a | |
422 | new format. |
|
422 | new format. | |
423 | """ |
|
423 | """ | |
424 | def __init__(self, repo, data, history, fullhistory, category, gc=False, |
|
424 | def __init__(self, repo, data, history, fullhistory, category, gc=False, | |
425 | isold=None, options=None): |
|
425 | isold=None, options=None): | |
426 | self.repo = repo |
|
426 | self.repo = repo | |
427 | self.data = data |
|
427 | self.data = data | |
428 | self.history = history |
|
428 | self.history = history | |
429 | self.fullhistory = fullhistory |
|
429 | self.fullhistory = fullhistory | |
430 | self.unit = constants.getunits(category) |
|
430 | self.unit = constants.getunits(category) | |
431 | self.garbagecollect = gc |
|
431 | self.garbagecollect = gc | |
432 | self.options = options |
|
432 | self.options = options | |
433 | if self.garbagecollect: |
|
433 | if self.garbagecollect: | |
434 | if not isold: |
|
434 | if not isold: | |
435 | raise ValueError("Function 'isold' is not properly specified") |
|
435 | raise ValueError("Function 'isold' is not properly specified") | |
436 | # use (filename, node) tuple as a keepset key |
|
436 | # use (filename, node) tuple as a keepset key | |
437 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) |
|
437 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) | |
438 | self.isold = isold |
|
438 | self.isold = isold | |
439 |
|
439 | |||
440 | def run(self, targetdata, targethistory): |
|
440 | def run(self, targetdata, targethistory): | |
441 | ledger = repackledger() |
|
441 | ledger = repackledger() | |
442 |
|
442 | |||
443 | with extutil.flock(repacklockvfs(self.repo).join("repacklock"), |
|
443 | with extutil.flock(repacklockvfs(self.repo).join("repacklock"), | |
444 | _('repacking %s') % self.repo.origroot, timeout=0): |
|
444 | _('repacking %s') % self.repo.origroot, timeout=0): | |
445 | self.repo.hook('prerepack') |
|
445 | self.repo.hook('prerepack') | |
446 |
|
446 | |||
447 | # Populate ledger from source |
|
447 | # Populate ledger from source | |
448 | self.data.markledger(ledger, options=self.options) |
|
448 | self.data.markledger(ledger, options=self.options) | |
449 | self.history.markledger(ledger, options=self.options) |
|
449 | self.history.markledger(ledger, options=self.options) | |
450 |
|
450 | |||
451 | # Run repack |
|
451 | # Run repack | |
452 | self.repackdata(ledger, targetdata) |
|
452 | self.repackdata(ledger, targetdata) | |
453 | self.repackhistory(ledger, targethistory) |
|
453 | self.repackhistory(ledger, targethistory) | |
454 |
|
454 | |||
455 | # Call cleanup on each source |
|
455 | # Call cleanup on each source | |
456 | for source in ledger.sources: |
|
456 | for source in ledger.sources: | |
457 | source.cleanup(ledger) |
|
457 | source.cleanup(ledger) | |
458 |
|
458 | |||
459 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): |
|
459 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): | |
460 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and |
|
460 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and | |
461 | ``deltabases``. |
|
461 | ``deltabases``. | |
462 |
|
462 | |||
463 | We often have orphan entries (nodes without a base that aren't |
|
463 | We often have orphan entries (nodes without a base that aren't | |
464 | referenced by other nodes -- i.e., part of a chain) due to gaps in |
|
464 | referenced by other nodes -- i.e., part of a chain) due to gaps in | |
465 | history. Rather than store them as individual fulltexts, we prefer to |
|
465 | history. Rather than store them as individual fulltexts, we prefer to | |
466 | insert them as one chain sorted by size. |
|
466 | insert them as one chain sorted by size. | |
467 | """ |
|
467 | """ | |
468 | if not orphans: |
|
468 | if not orphans: | |
469 | return nodes |
|
469 | return nodes | |
470 |
|
470 | |||
471 | def getsize(node, default=0): |
|
471 | def getsize(node, default=0): | |
472 | meta = self.data.getmeta(filename, node) |
|
472 | meta = self.data.getmeta(filename, node) | |
473 | if constants.METAKEYSIZE in meta: |
|
473 | if constants.METAKEYSIZE in meta: | |
474 | return meta[constants.METAKEYSIZE] |
|
474 | return meta[constants.METAKEYSIZE] | |
475 | else: |
|
475 | else: | |
476 | return default |
|
476 | return default | |
477 |
|
477 | |||
478 | # Sort orphans by size; biggest first is preferred, since it's more |
|
478 | # Sort orphans by size; biggest first is preferred, since it's more | |
479 | # likely to be the newest version assuming files grow over time. |
|
479 | # likely to be the newest version assuming files grow over time. | |
480 | # (Sort by node first to ensure the sort is stable.) |
|
480 | # (Sort by node first to ensure the sort is stable.) | |
481 | orphans = sorted(orphans) |
|
481 | orphans = sorted(orphans) | |
482 | orphans = list(sorted(orphans, key=getsize, reverse=True)) |
|
482 | orphans = list(sorted(orphans, key=getsize, reverse=True)) | |
483 | if ui.debugflag: |
|
483 | if ui.debugflag: | |
484 | ui.debug("%s: orphan chain: %s\n" % (filename, |
|
484 | ui.debug("%s: orphan chain: %s\n" % (filename, | |
485 | ", ".join([short(s) for s in orphans]))) |
|
485 | ", ".join([short(s) for s in orphans]))) | |
486 |
|
486 | |||
487 | # Create one contiguous chain and reassign deltabases. |
|
487 | # Create one contiguous chain and reassign deltabases. | |
488 | for i, node in enumerate(orphans): |
|
488 | for i, node in enumerate(orphans): | |
489 | if i == 0: |
|
489 | if i == 0: | |
490 | deltabases[node] = (nullid, 0) |
|
490 | deltabases[node] = (nullid, 0) | |
491 | else: |
|
491 | else: | |
492 | parent = orphans[i - 1] |
|
492 | parent = orphans[i - 1] | |
493 | deltabases[node] = (parent, deltabases[parent][1] + 1) |
|
493 | deltabases[node] = (parent, deltabases[parent][1] + 1) | |
494 | nodes = filter(lambda node: node not in orphans, nodes) |
|
494 | nodes = filter(lambda node: node not in orphans, nodes) | |
495 | nodes += orphans |
|
495 | nodes += orphans | |
496 | return nodes |
|
496 | return nodes | |
497 |
|
497 | |||
498 | def repackdata(self, ledger, target): |
|
498 | def repackdata(self, ledger, target): | |
499 | ui = self.repo.ui |
|
499 | ui = self.repo.ui | |
500 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) |
|
500 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) | |
501 |
|
501 | |||
502 | byfile = {} |
|
502 | byfile = {} | |
503 | for entry in ledger.entries.itervalues(): |
|
503 | for entry in ledger.entries.itervalues(): | |
504 | if entry.datasource: |
|
504 | if entry.datasource: | |
505 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
505 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
506 |
|
506 | |||
507 | count = 0 |
|
507 | count = 0 | |
508 | for filename, entries in sorted(byfile.iteritems()): |
|
508 | for filename, entries in sorted(byfile.iteritems()): | |
509 | ui.progress(_("repacking data"), count, unit=self.unit, |
|
509 | ui.progress(_("repacking data"), count, unit=self.unit, | |
510 | total=len(byfile)) |
|
510 | total=len(byfile)) | |
511 |
|
511 | |||
512 | ancestors = {} |
|
512 | ancestors = {} | |
513 | nodes = list(node for node in entries.iterkeys()) |
|
513 | nodes = list(node for node in entries.iterkeys()) | |
514 | nohistory = [] |
|
514 | nohistory = [] | |
515 | for i, node in enumerate(nodes): |
|
515 | for i, node in enumerate(nodes): | |
516 | if node in ancestors: |
|
516 | if node in ancestors: | |
517 | continue |
|
517 | continue | |
518 | ui.progress(_("building history"), i, unit='nodes', |
|
518 | ui.progress(_("building history"), i, unit='nodes', | |
519 | total=len(nodes)) |
|
519 | total=len(nodes)) | |
520 | try: |
|
520 | try: | |
521 | ancestors.update(self.fullhistory.getancestors(filename, |
|
521 | ancestors.update(self.fullhistory.getancestors(filename, | |
522 | node, known=ancestors)) |
|
522 | node, known=ancestors)) | |
523 | except KeyError: |
|
523 | except KeyError: | |
524 | # Since we're packing data entries, we may not have the |
|
524 | # Since we're packing data entries, we may not have the | |
525 | # corresponding history entries for them. It's not a big |
|
525 | # corresponding history entries for them. It's not a big | |
526 | # deal, but the entries won't be delta'd perfectly. |
|
526 | # deal, but the entries won't be delta'd perfectly. | |
527 | nohistory.append(node) |
|
527 | nohistory.append(node) | |
528 | ui.progress(_("building history"), None) |
|
528 | ui.progress(_("building history"), None) | |
529 |
|
529 | |||
530 | # Order the nodes children first, so we can produce reverse deltas |
|
530 | # Order the nodes children first, so we can produce reverse deltas | |
531 | orderednodes = list(reversed(self._toposort(ancestors))) |
|
531 | orderednodes = list(reversed(self._toposort(ancestors))) | |
532 | if len(nohistory) > 0: |
|
532 | if len(nohistory) > 0: | |
533 | ui.debug('repackdata: %d nodes without history\n' % |
|
533 | ui.debug('repackdata: %d nodes without history\n' % | |
534 | len(nohistory)) |
|
534 | len(nohistory)) | |
535 | orderednodes.extend(sorted(nohistory)) |
|
535 | orderednodes.extend(sorted(nohistory)) | |
536 |
|
536 | |||
537 | # Filter orderednodes to just the nodes we want to serialize (it |
|
537 | # Filter orderednodes to just the nodes we want to serialize (it | |
538 | # currently also has the edge nodes' ancestors). |
|
538 | # currently also has the edge nodes' ancestors). | |
539 | orderednodes = filter(lambda node: node in nodes, orderednodes) |
|
539 | orderednodes = filter(lambda node: node in nodes, orderednodes) | |
540 |
|
540 | |||
541 | # Garbage collect old nodes: |
|
541 | # Garbage collect old nodes: | |
542 | if self.garbagecollect: |
|
542 | if self.garbagecollect: | |
543 | neworderednodes = [] |
|
543 | neworderednodes = [] | |
544 | for node in orderednodes: |
|
544 | for node in orderednodes: | |
545 | # If the node is old and is not in the keepset, we skip it, |
|
545 | # If the node is old and is not in the keepset, we skip it, | |
546 | # and mark as garbage collected |
|
546 | # and mark as garbage collected | |
547 | if ((filename, node) not in self.keepkeys and |
|
547 | if ((filename, node) not in self.keepkeys and | |
548 | self.isold(self.repo, filename, node)): |
|
548 | self.isold(self.repo, filename, node)): | |
549 | entries[node].gced = True |
|
549 | entries[node].gced = True | |
550 | continue |
|
550 | continue | |
551 | neworderednodes.append(node) |
|
551 | neworderednodes.append(node) | |
552 | orderednodes = neworderednodes |
|
552 | orderednodes = neworderednodes | |
553 |
|
553 | |||
554 | # Compute delta bases for nodes: |
|
554 | # Compute delta bases for nodes: | |
555 | deltabases = {} |
|
555 | deltabases = {} | |
556 | nobase = set() |
|
556 | nobase = set() | |
557 | referenced = set() |
|
557 | referenced = set() | |
558 | nodes = set(nodes) |
|
558 | nodes = set(nodes) | |
559 | for i, node in enumerate(orderednodes): |
|
559 | for i, node in enumerate(orderednodes): | |
560 | ui.progress(_("processing nodes"), i, unit='nodes', |
|
560 | ui.progress(_("processing nodes"), i, unit='nodes', | |
561 | total=len(orderednodes)) |
|
561 | total=len(orderednodes)) | |
562 | # Find delta base |
|
562 | # Find delta base | |
563 | # TODO: allow delta'ing against most recent descendant instead |
|
563 | # TODO: allow delta'ing against most recent descendant instead | |
564 | # of immediate child |
|
564 | # of immediate child | |
565 | deltatuple = deltabases.get(node, None) |
|
565 | deltatuple = deltabases.get(node, None) | |
566 | if deltatuple is None: |
|
566 | if deltatuple is None: | |
567 | deltabase, chainlen = nullid, 0 |
|
567 | deltabase, chainlen = nullid, 0 | |
568 | deltabases[node] = (nullid, 0) |
|
568 | deltabases[node] = (nullid, 0) | |
569 | nobase.add(node) |
|
569 | nobase.add(node) | |
570 | else: |
|
570 | else: | |
571 | deltabase, chainlen = deltatuple |
|
571 | deltabase, chainlen = deltatuple | |
572 | referenced.add(deltabase) |
|
572 | referenced.add(deltabase) | |
573 |
|
573 | |||
574 | # Use available ancestor information to inform our delta choices |
|
574 | # Use available ancestor information to inform our delta choices | |
575 | ancestorinfo = ancestors.get(node) |
|
575 | ancestorinfo = ancestors.get(node) | |
576 | if ancestorinfo: |
|
576 | if ancestorinfo: | |
577 | p1, p2, linknode, copyfrom = ancestorinfo |
|
577 | p1, p2, linknode, copyfrom = ancestorinfo | |
578 |
|
578 | |||
579 | # The presence of copyfrom means we're at a point where the |
|
579 | # The presence of copyfrom means we're at a point where the | |
580 | # file was copied from elsewhere. So don't attempt to do any |
|
580 | # file was copied from elsewhere. So don't attempt to do any | |
581 | # deltas with the other file. |
|
581 | # deltas with the other file. | |
582 | if copyfrom: |
|
582 | if copyfrom: | |
583 | p1 = nullid |
|
583 | p1 = nullid | |
584 |
|
584 | |||
585 | if chainlen < maxchainlen: |
|
585 | if chainlen < maxchainlen: | |
586 | # Record this child as the delta base for its parents. |
|
586 | # Record this child as the delta base for its parents. | |
587 | # This may be non optimal, since the parents may have |
|
587 | # This may be non optimal, since the parents may have | |
588 | # many children, and this will only choose the last one. |
|
588 | # many children, and this will only choose the last one. | |
589 | # TODO: record all children and try all deltas to find |
|
589 | # TODO: record all children and try all deltas to find | |
590 | # best |
|
590 | # best | |
591 | if p1 != nullid: |
|
591 | if p1 != nullid: | |
592 | deltabases[p1] = (node, chainlen + 1) |
|
592 | deltabases[p1] = (node, chainlen + 1) | |
593 | if p2 != nullid: |
|
593 | if p2 != nullid: | |
594 | deltabases[p2] = (node, chainlen + 1) |
|
594 | deltabases[p2] = (node, chainlen + 1) | |
595 |
|
595 | |||
596 | # experimental config: repack.chainorphansbysize |
|
596 | # experimental config: repack.chainorphansbysize | |
597 | if ui.configbool('repack', 'chainorphansbysize'): |
|
597 | if ui.configbool('repack', 'chainorphansbysize'): | |
598 | orphans = nobase - referenced |
|
598 | orphans = nobase - referenced | |
599 | orderednodes = self._chainorphans(ui, filename, orderednodes, |
|
599 | orderednodes = self._chainorphans(ui, filename, orderednodes, | |
600 | orphans, deltabases) |
|
600 | orphans, deltabases) | |
601 |
|
601 | |||
602 | # Compute deltas and write to the pack |
|
602 | # Compute deltas and write to the pack | |
603 | for i, node in enumerate(orderednodes): |
|
603 | for i, node in enumerate(orderednodes): | |
604 | deltabase, chainlen = deltabases[node] |
|
604 | deltabase, chainlen = deltabases[node] | |
605 | # Compute delta |
|
605 | # Compute delta | |
606 | # TODO: Optimize the deltachain fetching. Since we're |
|
606 | # TODO: Optimize the deltachain fetching. Since we're | |
607 | # iterating over the different version of the file, we may |
|
607 | # iterating over the different version of the file, we may | |
608 | # be fetching the same deltachain over and over again. |
|
608 | # be fetching the same deltachain over and over again. | |
609 | meta = None |
|
609 | meta = None | |
610 | if deltabase != nullid: |
|
610 | if deltabase != nullid: | |
611 | deltaentry = self.data.getdelta(filename, node) |
|
611 | deltaentry = self.data.getdelta(filename, node) | |
612 | delta, deltabasename, origdeltabase, meta = deltaentry |
|
612 | delta, deltabasename, origdeltabase, meta = deltaentry | |
613 | size = meta.get(constants.METAKEYSIZE) |
|
613 | size = meta.get(constants.METAKEYSIZE) | |
614 | if (deltabasename != filename or origdeltabase != deltabase |
|
614 | if (deltabasename != filename or origdeltabase != deltabase | |
615 | or size is None): |
|
615 | or size is None): | |
616 | deltabasetext = self.data.get(filename, deltabase) |
|
616 | deltabasetext = self.data.get(filename, deltabase) | |
617 | original = self.data.get(filename, node) |
|
617 | original = self.data.get(filename, node) | |
618 | size = len(original) |
|
618 | size = len(original) | |
619 | delta = mdiff.textdiff(deltabasetext, original) |
|
619 | delta = mdiff.textdiff(deltabasetext, original) | |
620 | else: |
|
620 | else: | |
621 | delta = self.data.get(filename, node) |
|
621 | delta = self.data.get(filename, node) | |
622 | size = len(delta) |
|
622 | size = len(delta) | |
623 | meta = self.data.getmeta(filename, node) |
|
623 | meta = self.data.getmeta(filename, node) | |
624 |
|
624 | |||
625 | # TODO: don't use the delta if it's larger than the fulltext |
|
625 | # TODO: don't use the delta if it's larger than the fulltext | |
626 | if constants.METAKEYSIZE not in meta: |
|
626 | if constants.METAKEYSIZE not in meta: | |
627 | meta[constants.METAKEYSIZE] = size |
|
627 | meta[constants.METAKEYSIZE] = size | |
628 | target.add(filename, node, deltabase, delta, meta) |
|
628 | target.add(filename, node, deltabase, delta, meta) | |
629 |
|
629 | |||
630 | entries[node].datarepacked = True |
|
630 | entries[node].datarepacked = True | |
631 |
|
631 | |||
632 | ui.progress(_("processing nodes"), None) |
|
632 | ui.progress(_("processing nodes"), None) | |
633 | count += 1 |
|
633 | count += 1 | |
634 |
|
634 | |||
635 | ui.progress(_("repacking data"), None) |
|
635 | ui.progress(_("repacking data"), None) | |
636 | target.close(ledger=ledger) |
|
636 | target.close(ledger=ledger) | |
637 |
|
637 | |||
638 | def repackhistory(self, ledger, target): |
|
638 | def repackhistory(self, ledger, target): | |
639 | ui = self.repo.ui |
|
639 | ui = self.repo.ui | |
640 |
|
640 | |||
641 | byfile = {} |
|
641 | byfile = {} | |
642 | for entry in ledger.entries.itervalues(): |
|
642 | for entry in ledger.entries.itervalues(): | |
643 | if entry.historysource: |
|
643 | if entry.historysource: | |
644 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
644 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
645 |
|
645 | |||
646 | count = 0 |
|
646 | count = 0 | |
647 | for filename, entries in sorted(byfile.iteritems()): |
|
647 | for filename, entries in sorted(byfile.iteritems()): | |
648 | ancestors = {} |
|
648 | ancestors = {} | |
649 | nodes = list(node for node in entries.iterkeys()) |
|
649 | nodes = list(node for node in entries.iterkeys()) | |
650 |
|
650 | |||
651 | for node in nodes: |
|
651 | for node in nodes: | |
652 | if node in ancestors: |
|
652 | if node in ancestors: | |
653 | continue |
|
653 | continue | |
654 | ancestors.update(self.history.getancestors(filename, node, |
|
654 | ancestors.update(self.history.getancestors(filename, node, | |
655 | known=ancestors)) |
|
655 | known=ancestors)) | |
656 |
|
656 | |||
657 | # Order the nodes children first |
|
657 | # Order the nodes children first | |
658 | orderednodes = reversed(self._toposort(ancestors)) |
|
658 | orderednodes = reversed(self._toposort(ancestors)) | |
659 |
|
659 | |||
660 | # Write to the pack |
|
660 | # Write to the pack | |
661 | dontprocess = set() |
|
661 | dontprocess = set() | |
662 | for node in orderednodes: |
|
662 | for node in orderednodes: | |
663 | p1, p2, linknode, copyfrom = ancestors[node] |
|
663 | p1, p2, linknode, copyfrom = ancestors[node] | |
664 |
|
664 | |||
665 | # If the node is marked dontprocess, but it's also in the |
|
665 | # If the node is marked dontprocess, but it's also in the | |
666 | # explicit entries set, that means the node exists both in this |
|
666 | # explicit entries set, that means the node exists both in this | |
667 | # file and in another file that was copied to this file. |
|
667 | # file and in another file that was copied to this file. | |
668 | # Usually this happens if the file was copied to another file, |
|
668 | # Usually this happens if the file was copied to another file, | |
669 | # then the copy was deleted, then reintroduced without copy |
|
669 | # then the copy was deleted, then reintroduced without copy | |
670 | # metadata. The original add and the new add have the same hash |
|
670 | # metadata. The original add and the new add have the same hash | |
671 | # since the content is identical and the parents are null. |
|
671 | # since the content is identical and the parents are null. | |
672 | if node in dontprocess and node not in entries: |
|
672 | if node in dontprocess and node not in entries: | |
673 | # If copyfrom == filename, it means the copy history |
|
673 | # If copyfrom == filename, it means the copy history | |
674 | # went to come other file, then came back to this one, so we |
|
674 | # went to come other file, then came back to this one, so we | |
675 | # should continue processing it. |
|
675 | # should continue processing it. | |
676 | if p1 != nullid and copyfrom != filename: |
|
676 | if p1 != nullid and copyfrom != filename: | |
677 | dontprocess.add(p1) |
|
677 | dontprocess.add(p1) | |
678 | if p2 != nullid: |
|
678 | if p2 != nullid: | |
679 | dontprocess.add(p2) |
|
679 | dontprocess.add(p2) | |
680 | continue |
|
680 | continue | |
681 |
|
681 | |||
682 | if copyfrom: |
|
682 | if copyfrom: | |
683 | dontprocess.add(p1) |
|
683 | dontprocess.add(p1) | |
684 |
|
684 | |||
685 | target.add(filename, node, p1, p2, linknode, copyfrom) |
|
685 | target.add(filename, node, p1, p2, linknode, copyfrom) | |
686 |
|
686 | |||
687 | if node in entries: |
|
687 | if node in entries: | |
688 | entries[node].historyrepacked = True |
|
688 | entries[node].historyrepacked = True | |
689 |
|
689 | |||
690 | count += 1 |
|
690 | count += 1 | |
691 | ui.progress(_("repacking history"), count, unit=self.unit, |
|
691 | ui.progress(_("repacking history"), count, unit=self.unit, | |
692 | total=len(byfile)) |
|
692 | total=len(byfile)) | |
693 |
|
693 | |||
694 | ui.progress(_("repacking history"), None) |
|
694 | ui.progress(_("repacking history"), None) | |
695 | target.close(ledger=ledger) |
|
695 | target.close(ledger=ledger) | |
696 |
|
696 | |||
697 | def _toposort(self, ancestors): |
|
697 | def _toposort(self, ancestors): | |
698 | def parentfunc(node): |
|
698 | def parentfunc(node): | |
699 | p1, p2, linknode, copyfrom = ancestors[node] |
|
699 | p1, p2, linknode, copyfrom = ancestors[node] | |
700 | parents = [] |
|
700 | parents = [] | |
701 | if p1 != nullid: |
|
701 | if p1 != nullid: | |
702 | parents.append(p1) |
|
702 | parents.append(p1) | |
703 | if p2 != nullid: |
|
703 | if p2 != nullid: | |
704 | parents.append(p2) |
|
704 | parents.append(p2) | |
705 | return parents |
|
705 | return parents | |
706 |
|
706 | |||
707 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) |
|
707 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | |
708 | return sortednodes |
|
708 | return sortednodes | |
709 |
|
709 | |||
710 | class repackledger(object): |
|
710 | class repackledger(object): | |
711 | """Storage for all the bookkeeping that happens during a repack. It contains |
|
711 | """Storage for all the bookkeeping that happens during a repack. It contains | |
712 | the list of revisions being repacked, what happened to each revision, and |
|
712 | the list of revisions being repacked, what happened to each revision, and | |
713 | which source store contained which revision originally (for later cleanup). |
|
713 | which source store contained which revision originally (for later cleanup). | |
714 | """ |
|
714 | """ | |
715 | def __init__(self): |
|
715 | def __init__(self): | |
716 | self.entries = {} |
|
716 | self.entries = {} | |
717 | self.sources = {} |
|
717 | self.sources = {} | |
718 | self.created = set() |
|
718 | self.created = set() | |
719 |
|
719 | |||
720 | def markdataentry(self, source, filename, node): |
|
720 | def markdataentry(self, source, filename, node): | |
721 | """Mark the given filename+node revision as having a data rev in the |
|
721 | """Mark the given filename+node revision as having a data rev in the | |
722 | given source. |
|
722 | given source. | |
723 | """ |
|
723 | """ | |
724 | entry = self._getorcreateentry(filename, node) |
|
724 | entry = self._getorcreateentry(filename, node) | |
725 | entry.datasource = True |
|
725 | entry.datasource = True | |
726 | entries = self.sources.get(source) |
|
726 | entries = self.sources.get(source) | |
727 | if not entries: |
|
727 | if not entries: | |
728 | entries = set() |
|
728 | entries = set() | |
729 | self.sources[source] = entries |
|
729 | self.sources[source] = entries | |
730 | entries.add(entry) |
|
730 | entries.add(entry) | |
731 |
|
731 | |||
732 | def markhistoryentry(self, source, filename, node): |
|
732 | def markhistoryentry(self, source, filename, node): | |
733 | """Mark the given filename+node revision as having a history rev in the |
|
733 | """Mark the given filename+node revision as having a history rev in the | |
734 | given source. |
|
734 | given source. | |
735 | """ |
|
735 | """ | |
736 | entry = self._getorcreateentry(filename, node) |
|
736 | entry = self._getorcreateentry(filename, node) | |
737 | entry.historysource = True |
|
737 | entry.historysource = True | |
738 | entries = self.sources.get(source) |
|
738 | entries = self.sources.get(source) | |
739 | if not entries: |
|
739 | if not entries: | |
740 | entries = set() |
|
740 | entries = set() | |
741 | self.sources[source] = entries |
|
741 | self.sources[source] = entries | |
742 | entries.add(entry) |
|
742 | entries.add(entry) | |
743 |
|
743 | |||
744 | def _getorcreateentry(self, filename, node): |
|
744 | def _getorcreateentry(self, filename, node): | |
745 | key = (filename, node) |
|
745 | key = (filename, node) | |
746 | value = self.entries.get(key) |
|
746 | value = self.entries.get(key) | |
747 | if not value: |
|
747 | if not value: | |
748 | value = repackentry(filename, node) |
|
748 | value = repackentry(filename, node) | |
749 | self.entries[key] = value |
|
749 | self.entries[key] = value | |
750 |
|
750 | |||
751 | return value |
|
751 | return value | |
752 |
|
752 | |||
753 | def addcreated(self, value): |
|
753 | def addcreated(self, value): | |
754 | self.created.add(value) |
|
754 | self.created.add(value) | |
755 |
|
755 | |||
756 | class repackentry(object): |
|
756 | class repackentry(object): | |
757 | """Simple class representing a single revision entry in the repackledger. |
|
757 | """Simple class representing a single revision entry in the repackledger. | |
758 | """ |
|
758 | """ | |
759 | __slots__ = ['filename', 'node', 'datasource', 'historysource', |
|
759 | __slots__ = ['filename', 'node', 'datasource', 'historysource', | |
760 | 'datarepacked', 'historyrepacked', 'gced'] |
|
760 | 'datarepacked', 'historyrepacked', 'gced'] | |
761 | def __init__(self, filename, node): |
|
761 | def __init__(self, filename, node): | |
762 | self.filename = filename |
|
762 | self.filename = filename | |
763 | self.node = node |
|
763 | self.node = node | |
764 | # If the revision has a data entry in the source |
|
764 | # If the revision has a data entry in the source | |
765 | self.datasource = False |
|
765 | self.datasource = False | |
766 | # If the revision has a history entry in the source |
|
766 | # If the revision has a history entry in the source | |
767 | self.historysource = False |
|
767 | self.historysource = False | |
768 | # If the revision's data entry was repacked into the repack target |
|
768 | # If the revision's data entry was repacked into the repack target | |
769 | self.datarepacked = False |
|
769 | self.datarepacked = False | |
770 | # If the revision's history entry was repacked into the repack target |
|
770 | # If the revision's history entry was repacked into the repack target | |
771 | self.historyrepacked = False |
|
771 | self.historyrepacked = False | |
772 | # If garbage collected |
|
772 | # If garbage collected | |
773 | self.gced = False |
|
773 | self.gced = False | |
774 |
|
774 | |||
775 | def repacklockvfs(repo): |
|
775 | def repacklockvfs(repo): | |
776 | if util.safehasattr(repo, 'name'): |
|
776 | if util.safehasattr(repo, 'name'): | |
777 | # Lock in the shared cache so repacks across multiple copies of the same |
|
777 | # Lock in the shared cache so repacks across multiple copies of the same | |
778 | # repo are coordinated. |
|
778 | # repo are coordinated. | |
779 | sharedcachepath = shallowutil.getcachepackpath( |
|
779 | sharedcachepath = shallowutil.getcachepackpath( | |
780 | repo, |
|
780 | repo, | |
781 | constants.FILEPACK_CATEGORY) |
|
781 | constants.FILEPACK_CATEGORY) | |
782 | return vfs.vfs(sharedcachepath) |
|
782 | return vfs.vfs(sharedcachepath) | |
783 | else: |
|
783 | else: | |
784 | return repo.svfs |
|
784 | return repo.svfs |
@@ -1,308 +1,307 b'' | |||||
1 | # shallowrepo.py - shallow repository that uses remote filelogs |
|
1 | # shallowrepo.py - shallow repository that uses remote filelogs | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import os |
|
9 | import os | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial.node import hex, nullid, nullrev |
|
12 | from mercurial.node import hex, nullid, nullrev | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | encoding, |
|
14 | encoding, | |
15 | error, |
|
15 | error, | |
16 | localrepo, |
|
16 | localrepo, | |
17 | match, |
|
17 | match, | |
18 | scmutil, |
|
18 | scmutil, | |
19 | sparse, |
|
19 | sparse, | |
20 | util, |
|
20 | util, | |
21 | ) |
|
21 | ) | |
22 | from mercurial.utils import procutil |
|
22 | from mercurial.utils import procutil | |
23 | from . import ( |
|
23 | from . import ( | |
24 | connectionpool, |
|
24 | connectionpool, | |
25 | constants, |
|
25 | constants, | |
26 | contentstore, |
|
26 | contentstore, | |
27 | datapack, |
|
27 | datapack, | |
28 | extutil, |
|
|||
29 | fileserverclient, |
|
28 | fileserverclient, | |
30 | historypack, |
|
29 | historypack, | |
31 | metadatastore, |
|
30 | metadatastore, | |
32 | remotefilectx, |
|
31 | remotefilectx, | |
33 | remotefilelog, |
|
32 | remotefilelog, | |
34 | shallowutil, |
|
33 | shallowutil, | |
35 | ) |
|
34 | ) | |
36 |
|
35 | |||
37 | if util.safehasattr(util, '_hgexecutable'): |
|
36 | if util.safehasattr(util, '_hgexecutable'): | |
38 | # Before 5be286db |
|
37 | # Before 5be286db | |
39 | _hgexecutable = util.hgexecutable |
|
38 | _hgexecutable = util.hgexecutable | |
40 | else: |
|
39 | else: | |
41 | from mercurial.utils import procutil |
|
40 | from mercurial.utils import procutil | |
42 | _hgexecutable = procutil.hgexecutable |
|
41 | _hgexecutable = procutil.hgexecutable | |
43 |
|
42 | |||
44 | requirement = "remotefilelog" |
|
43 | requirement = "remotefilelog" | |
45 | _prefetching = _('prefetching') |
|
44 | _prefetching = _('prefetching') | |
46 |
|
45 | |||
47 | # These make*stores functions are global so that other extensions can replace |
|
46 | # These make*stores functions are global so that other extensions can replace | |
48 | # them. |
|
47 | # them. | |
49 | def makelocalstores(repo): |
|
48 | def makelocalstores(repo): | |
50 | """In-repo stores, like .hg/store/data; can not be discarded.""" |
|
49 | """In-repo stores, like .hg/store/data; can not be discarded.""" | |
51 | localpath = os.path.join(repo.svfs.vfs.base, 'data') |
|
50 | localpath = os.path.join(repo.svfs.vfs.base, 'data') | |
52 | if not os.path.exists(localpath): |
|
51 | if not os.path.exists(localpath): | |
53 | os.makedirs(localpath) |
|
52 | os.makedirs(localpath) | |
54 |
|
53 | |||
55 | # Instantiate local data stores |
|
54 | # Instantiate local data stores | |
56 | localcontent = contentstore.remotefilelogcontentstore( |
|
55 | localcontent = contentstore.remotefilelogcontentstore( | |
57 | repo, localpath, repo.name, shared=False) |
|
56 | repo, localpath, repo.name, shared=False) | |
58 | localmetadata = metadatastore.remotefilelogmetadatastore( |
|
57 | localmetadata = metadatastore.remotefilelogmetadatastore( | |
59 | repo, localpath, repo.name, shared=False) |
|
58 | repo, localpath, repo.name, shared=False) | |
60 | return localcontent, localmetadata |
|
59 | return localcontent, localmetadata | |
61 |
|
60 | |||
62 | def makecachestores(repo): |
|
61 | def makecachestores(repo): | |
63 | """Typically machine-wide, cache of remote data; can be discarded.""" |
|
62 | """Typically machine-wide, cache of remote data; can be discarded.""" | |
64 | # Instantiate shared cache stores |
|
63 | # Instantiate shared cache stores | |
65 | cachepath = shallowutil.getcachepath(repo.ui) |
|
64 | cachepath = shallowutil.getcachepath(repo.ui) | |
66 | cachecontent = contentstore.remotefilelogcontentstore( |
|
65 | cachecontent = contentstore.remotefilelogcontentstore( | |
67 | repo, cachepath, repo.name, shared=True) |
|
66 | repo, cachepath, repo.name, shared=True) | |
68 | cachemetadata = metadatastore.remotefilelogmetadatastore( |
|
67 | cachemetadata = metadatastore.remotefilelogmetadatastore( | |
69 | repo, cachepath, repo.name, shared=True) |
|
68 | repo, cachepath, repo.name, shared=True) | |
70 |
|
69 | |||
71 | repo.sharedstore = cachecontent |
|
70 | repo.sharedstore = cachecontent | |
72 | repo.shareddatastores.append(cachecontent) |
|
71 | repo.shareddatastores.append(cachecontent) | |
73 | repo.sharedhistorystores.append(cachemetadata) |
|
72 | repo.sharedhistorystores.append(cachemetadata) | |
74 |
|
73 | |||
75 | return cachecontent, cachemetadata |
|
74 | return cachecontent, cachemetadata | |
76 |
|
75 | |||
77 | def makeremotestores(repo, cachecontent, cachemetadata): |
|
76 | def makeremotestores(repo, cachecontent, cachemetadata): | |
78 | """These stores fetch data from a remote server.""" |
|
77 | """These stores fetch data from a remote server.""" | |
79 | # Instantiate remote stores |
|
78 | # Instantiate remote stores | |
80 | repo.fileservice = fileserverclient.fileserverclient(repo) |
|
79 | repo.fileservice = fileserverclient.fileserverclient(repo) | |
81 | remotecontent = contentstore.remotecontentstore( |
|
80 | remotecontent = contentstore.remotecontentstore( | |
82 | repo.ui, repo.fileservice, cachecontent) |
|
81 | repo.ui, repo.fileservice, cachecontent) | |
83 | remotemetadata = metadatastore.remotemetadatastore( |
|
82 | remotemetadata = metadatastore.remotemetadatastore( | |
84 | repo.ui, repo.fileservice, cachemetadata) |
|
83 | repo.ui, repo.fileservice, cachemetadata) | |
85 | return remotecontent, remotemetadata |
|
84 | return remotecontent, remotemetadata | |
86 |
|
85 | |||
87 | def makepackstores(repo): |
|
86 | def makepackstores(repo): | |
88 | """Packs are more efficient (to read from) cache stores.""" |
|
87 | """Packs are more efficient (to read from) cache stores.""" | |
89 | # Instantiate pack stores |
|
88 | # Instantiate pack stores | |
90 | packpath = shallowutil.getcachepackpath(repo, |
|
89 | packpath = shallowutil.getcachepackpath(repo, | |
91 | constants.FILEPACK_CATEGORY) |
|
90 | constants.FILEPACK_CATEGORY) | |
92 | packcontentstore = datapack.datapackstore(repo.ui, packpath) |
|
91 | packcontentstore = datapack.datapackstore(repo.ui, packpath) | |
93 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) |
|
92 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) | |
94 |
|
93 | |||
95 | repo.shareddatastores.append(packcontentstore) |
|
94 | repo.shareddatastores.append(packcontentstore) | |
96 | repo.sharedhistorystores.append(packmetadatastore) |
|
95 | repo.sharedhistorystores.append(packmetadatastore) | |
97 | shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore, |
|
96 | shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore, | |
98 | packmetadatastore) |
|
97 | packmetadatastore) | |
99 | return packcontentstore, packmetadatastore |
|
98 | return packcontentstore, packmetadatastore | |
100 |
|
99 | |||
101 | def makeunionstores(repo): |
|
100 | def makeunionstores(repo): | |
102 | """Union stores iterate the other stores and return the first result.""" |
|
101 | """Union stores iterate the other stores and return the first result.""" | |
103 | repo.shareddatastores = [] |
|
102 | repo.shareddatastores = [] | |
104 | repo.sharedhistorystores = [] |
|
103 | repo.sharedhistorystores = [] | |
105 |
|
104 | |||
106 | packcontentstore, packmetadatastore = makepackstores(repo) |
|
105 | packcontentstore, packmetadatastore = makepackstores(repo) | |
107 | cachecontent, cachemetadata = makecachestores(repo) |
|
106 | cachecontent, cachemetadata = makecachestores(repo) | |
108 | localcontent, localmetadata = makelocalstores(repo) |
|
107 | localcontent, localmetadata = makelocalstores(repo) | |
109 | remotecontent, remotemetadata = makeremotestores(repo, cachecontent, |
|
108 | remotecontent, remotemetadata = makeremotestores(repo, cachecontent, | |
110 | cachemetadata) |
|
109 | cachemetadata) | |
111 |
|
110 | |||
112 | # Instantiate union stores |
|
111 | # Instantiate union stores | |
113 | repo.contentstore = contentstore.unioncontentstore( |
|
112 | repo.contentstore = contentstore.unioncontentstore( | |
114 | packcontentstore, cachecontent, |
|
113 | packcontentstore, cachecontent, | |
115 | localcontent, remotecontent, writestore=localcontent) |
|
114 | localcontent, remotecontent, writestore=localcontent) | |
116 | repo.metadatastore = metadatastore.unionmetadatastore( |
|
115 | repo.metadatastore = metadatastore.unionmetadatastore( | |
117 | packmetadatastore, cachemetadata, localmetadata, remotemetadata, |
|
116 | packmetadatastore, cachemetadata, localmetadata, remotemetadata, | |
118 | writestore=localmetadata) |
|
117 | writestore=localmetadata) | |
119 |
|
118 | |||
120 | fileservicedatawrite = cachecontent |
|
119 | fileservicedatawrite = cachecontent | |
121 | fileservicehistorywrite = cachemetadata |
|
120 | fileservicehistorywrite = cachemetadata | |
122 | if repo.ui.configbool('remotefilelog', 'fetchpacks'): |
|
121 | if repo.ui.configbool('remotefilelog', 'fetchpacks'): | |
123 | fileservicedatawrite = packcontentstore |
|
122 | fileservicedatawrite = packcontentstore | |
124 | fileservicehistorywrite = packmetadatastore |
|
123 | fileservicehistorywrite = packmetadatastore | |
125 | repo.fileservice.setstore(repo.contentstore, repo.metadatastore, |
|
124 | repo.fileservice.setstore(repo.contentstore, repo.metadatastore, | |
126 | fileservicedatawrite, fileservicehistorywrite) |
|
125 | fileservicedatawrite, fileservicehistorywrite) | |
127 | shallowutil.reportpackmetrics(repo.ui, 'filestore', |
|
126 | shallowutil.reportpackmetrics(repo.ui, 'filestore', | |
128 | packcontentstore, packmetadatastore) |
|
127 | packcontentstore, packmetadatastore) | |
129 |
|
128 | |||
130 | def wraprepo(repo): |
|
129 | def wraprepo(repo): | |
131 | class shallowrepository(repo.__class__): |
|
130 | class shallowrepository(repo.__class__): | |
132 | @util.propertycache |
|
131 | @util.propertycache | |
133 | def name(self): |
|
132 | def name(self): | |
134 | return self.ui.config('remotefilelog', 'reponame') |
|
133 | return self.ui.config('remotefilelog', 'reponame') | |
135 |
|
134 | |||
136 | @util.propertycache |
|
135 | @util.propertycache | |
137 | def fallbackpath(self): |
|
136 | def fallbackpath(self): | |
138 | path = repo.ui.config("remotefilelog", "fallbackpath", |
|
137 | path = repo.ui.config("remotefilelog", "fallbackpath", | |
139 | repo.ui.config('paths', 'default')) |
|
138 | repo.ui.config('paths', 'default')) | |
140 | if not path: |
|
139 | if not path: | |
141 | raise error.Abort("no remotefilelog server " |
|
140 | raise error.Abort("no remotefilelog server " | |
142 | "configured - is your .hg/hgrc trusted?") |
|
141 | "configured - is your .hg/hgrc trusted?") | |
143 |
|
142 | |||
144 | return path |
|
143 | return path | |
145 |
|
144 | |||
146 | def maybesparsematch(self, *revs, **kwargs): |
|
145 | def maybesparsematch(self, *revs, **kwargs): | |
147 | ''' |
|
146 | ''' | |
148 | A wrapper that allows the remotefilelog to invoke sparsematch() if |
|
147 | A wrapper that allows the remotefilelog to invoke sparsematch() if | |
149 | this is a sparse repository, or returns None if this is not a |
|
148 | this is a sparse repository, or returns None if this is not a | |
150 | sparse repository. |
|
149 | sparse repository. | |
151 | ''' |
|
150 | ''' | |
152 | if revs: |
|
151 | if revs: | |
153 | return sparse.matcher(repo, revs=revs) |
|
152 | return sparse.matcher(repo, revs=revs) | |
154 | return sparse.matcher(repo) |
|
153 | return sparse.matcher(repo) | |
155 |
|
154 | |||
156 | def file(self, f): |
|
155 | def file(self, f): | |
157 | if f[0] == '/': |
|
156 | if f[0] == '/': | |
158 | f = f[1:] |
|
157 | f = f[1:] | |
159 |
|
158 | |||
160 | if self.shallowmatch(f): |
|
159 | if self.shallowmatch(f): | |
161 | return remotefilelog.remotefilelog(self.svfs, f, self) |
|
160 | return remotefilelog.remotefilelog(self.svfs, f, self) | |
162 | else: |
|
161 | else: | |
163 | return super(shallowrepository, self).file(f) |
|
162 | return super(shallowrepository, self).file(f) | |
164 |
|
163 | |||
165 | def filectx(self, path, *args, **kwargs): |
|
164 | def filectx(self, path, *args, **kwargs): | |
166 | if self.shallowmatch(path): |
|
165 | if self.shallowmatch(path): | |
167 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) |
|
166 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) | |
168 | else: |
|
167 | else: | |
169 | return super(shallowrepository, self).filectx(path, *args, |
|
168 | return super(shallowrepository, self).filectx(path, *args, | |
170 | **kwargs) |
|
169 | **kwargs) | |
171 |
|
170 | |||
172 | @localrepo.unfilteredmethod |
|
171 | @localrepo.unfilteredmethod | |
173 | def commitctx(self, ctx, error=False): |
|
172 | def commitctx(self, ctx, error=False): | |
174 | """Add a new revision to current repository. |
|
173 | """Add a new revision to current repository. | |
175 | Revision information is passed via the context argument. |
|
174 | Revision information is passed via the context argument. | |
176 | """ |
|
175 | """ | |
177 |
|
176 | |||
178 | # some contexts already have manifest nodes, they don't need any |
|
177 | # some contexts already have manifest nodes, they don't need any | |
179 | # prefetching (for example if we're just editing a commit message |
|
178 | # prefetching (for example if we're just editing a commit message | |
180 | # we can reuse manifest |
|
179 | # we can reuse manifest | |
181 | if not ctx.manifestnode(): |
|
180 | if not ctx.manifestnode(): | |
182 | # prefetch files that will likely be compared |
|
181 | # prefetch files that will likely be compared | |
183 | m1 = ctx.p1().manifest() |
|
182 | m1 = ctx.p1().manifest() | |
184 | files = [] |
|
183 | files = [] | |
185 | for f in ctx.modified() + ctx.added(): |
|
184 | for f in ctx.modified() + ctx.added(): | |
186 | fparent1 = m1.get(f, nullid) |
|
185 | fparent1 = m1.get(f, nullid) | |
187 | if fparent1 != nullid: |
|
186 | if fparent1 != nullid: | |
188 | files.append((f, hex(fparent1))) |
|
187 | files.append((f, hex(fparent1))) | |
189 | self.fileservice.prefetch(files) |
|
188 | self.fileservice.prefetch(files) | |
190 | return super(shallowrepository, self).commitctx(ctx, |
|
189 | return super(shallowrepository, self).commitctx(ctx, | |
191 | error=error) |
|
190 | error=error) | |
192 |
|
191 | |||
193 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, |
|
192 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, | |
194 | opts=None): |
|
193 | opts=None): | |
195 | """Runs prefetch in background with optional repack |
|
194 | """Runs prefetch in background with optional repack | |
196 | """ |
|
195 | """ | |
197 | cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch'] |
|
196 | cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch'] | |
198 | if repack: |
|
197 | if repack: | |
199 | cmd.append('--repack') |
|
198 | cmd.append('--repack') | |
200 | if revs: |
|
199 | if revs: | |
201 | cmd += ['-r', revs] |
|
200 | cmd += ['-r', revs] | |
202 |
|
|
201 | procutil.runbgcommand(cmd, encoding.environ) | |
203 |
|
202 | |||
204 | def prefetch(self, revs, base=None, pats=None, opts=None): |
|
203 | def prefetch(self, revs, base=None, pats=None, opts=None): | |
205 | """Prefetches all the necessary file revisions for the given revs |
|
204 | """Prefetches all the necessary file revisions for the given revs | |
206 | Optionally runs repack in background |
|
205 | Optionally runs repack in background | |
207 | """ |
|
206 | """ | |
208 | with repo._lock(repo.svfs, 'prefetchlock', True, None, None, |
|
207 | with repo._lock(repo.svfs, 'prefetchlock', True, None, None, | |
209 | _('prefetching in %s') % repo.origroot): |
|
208 | _('prefetching in %s') % repo.origroot): | |
210 | self._prefetch(revs, base, pats, opts) |
|
209 | self._prefetch(revs, base, pats, opts) | |
211 |
|
210 | |||
212 | def _prefetch(self, revs, base=None, pats=None, opts=None): |
|
211 | def _prefetch(self, revs, base=None, pats=None, opts=None): | |
213 | fallbackpath = self.fallbackpath |
|
212 | fallbackpath = self.fallbackpath | |
214 | if fallbackpath: |
|
213 | if fallbackpath: | |
215 | # If we know a rev is on the server, we should fetch the server |
|
214 | # If we know a rev is on the server, we should fetch the server | |
216 | # version of those files, since our local file versions might |
|
215 | # version of those files, since our local file versions might | |
217 | # become obsolete if the local commits are stripped. |
|
216 | # become obsolete if the local commits are stripped. | |
218 | localrevs = repo.revs('outgoing(%s)', fallbackpath) |
|
217 | localrevs = repo.revs('outgoing(%s)', fallbackpath) | |
219 | if base is not None and base != nullrev: |
|
218 | if base is not None and base != nullrev: | |
220 | serverbase = list(repo.revs('first(reverse(::%s) - %ld)', |
|
219 | serverbase = list(repo.revs('first(reverse(::%s) - %ld)', | |
221 | base, localrevs)) |
|
220 | base, localrevs)) | |
222 | if serverbase: |
|
221 | if serverbase: | |
223 | base = serverbase[0] |
|
222 | base = serverbase[0] | |
224 | else: |
|
223 | else: | |
225 | localrevs = repo |
|
224 | localrevs = repo | |
226 |
|
225 | |||
227 | mfl = repo.manifestlog |
|
226 | mfl = repo.manifestlog | |
228 | mfrevlog = mfl.getstorage('') |
|
227 | mfrevlog = mfl.getstorage('') | |
229 | if base is not None: |
|
228 | if base is not None: | |
230 | mfdict = mfl[repo[base].manifestnode()].read() |
|
229 | mfdict = mfl[repo[base].manifestnode()].read() | |
231 | skip = set(mfdict.iteritems()) |
|
230 | skip = set(mfdict.iteritems()) | |
232 | else: |
|
231 | else: | |
233 | skip = set() |
|
232 | skip = set() | |
234 |
|
233 | |||
235 | # Copy the skip set to start large and avoid constant resizing, |
|
234 | # Copy the skip set to start large and avoid constant resizing, | |
236 | # and since it's likely to be very similar to the prefetch set. |
|
235 | # and since it's likely to be very similar to the prefetch set. | |
237 | files = skip.copy() |
|
236 | files = skip.copy() | |
238 | serverfiles = skip.copy() |
|
237 | serverfiles = skip.copy() | |
239 | visited = set() |
|
238 | visited = set() | |
240 | visited.add(nullrev) |
|
239 | visited.add(nullrev) | |
241 | revnum = 0 |
|
240 | revnum = 0 | |
242 | revcount = len(revs) |
|
241 | revcount = len(revs) | |
243 | self.ui.progress(_prefetching, revnum, total=revcount) |
|
242 | self.ui.progress(_prefetching, revnum, total=revcount) | |
244 | for rev in sorted(revs): |
|
243 | for rev in sorted(revs): | |
245 | ctx = repo[rev] |
|
244 | ctx = repo[rev] | |
246 | if pats: |
|
245 | if pats: | |
247 | m = scmutil.match(ctx, pats, opts) |
|
246 | m = scmutil.match(ctx, pats, opts) | |
248 | sparsematch = repo.maybesparsematch(rev) |
|
247 | sparsematch = repo.maybesparsematch(rev) | |
249 |
|
248 | |||
250 | mfnode = ctx.manifestnode() |
|
249 | mfnode = ctx.manifestnode() | |
251 | mfrev = mfrevlog.rev(mfnode) |
|
250 | mfrev = mfrevlog.rev(mfnode) | |
252 |
|
251 | |||
253 | # Decompressing manifests is expensive. |
|
252 | # Decompressing manifests is expensive. | |
254 | # When possible, only read the deltas. |
|
253 | # When possible, only read the deltas. | |
255 | p1, p2 = mfrevlog.parentrevs(mfrev) |
|
254 | p1, p2 = mfrevlog.parentrevs(mfrev) | |
256 | if p1 in visited and p2 in visited: |
|
255 | if p1 in visited and p2 in visited: | |
257 | mfdict = mfl[mfnode].readfast() |
|
256 | mfdict = mfl[mfnode].readfast() | |
258 | else: |
|
257 | else: | |
259 | mfdict = mfl[mfnode].read() |
|
258 | mfdict = mfl[mfnode].read() | |
260 |
|
259 | |||
261 | diff = mfdict.iteritems() |
|
260 | diff = mfdict.iteritems() | |
262 | if pats: |
|
261 | if pats: | |
263 | diff = (pf for pf in diff if m(pf[0])) |
|
262 | diff = (pf for pf in diff if m(pf[0])) | |
264 | if sparsematch: |
|
263 | if sparsematch: | |
265 | diff = (pf for pf in diff if sparsematch(pf[0])) |
|
264 | diff = (pf for pf in diff if sparsematch(pf[0])) | |
266 | if rev not in localrevs: |
|
265 | if rev not in localrevs: | |
267 | serverfiles.update(diff) |
|
266 | serverfiles.update(diff) | |
268 | else: |
|
267 | else: | |
269 | files.update(diff) |
|
268 | files.update(diff) | |
270 |
|
269 | |||
271 | visited.add(mfrev) |
|
270 | visited.add(mfrev) | |
272 | revnum += 1 |
|
271 | revnum += 1 | |
273 | self.ui.progress(_prefetching, revnum, total=revcount) |
|
272 | self.ui.progress(_prefetching, revnum, total=revcount) | |
274 |
|
273 | |||
275 | files.difference_update(skip) |
|
274 | files.difference_update(skip) | |
276 | serverfiles.difference_update(skip) |
|
275 | serverfiles.difference_update(skip) | |
277 | self.ui.progress(_prefetching, None) |
|
276 | self.ui.progress(_prefetching, None) | |
278 |
|
277 | |||
279 | # Fetch files known to be on the server |
|
278 | # Fetch files known to be on the server | |
280 | if serverfiles: |
|
279 | if serverfiles: | |
281 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] |
|
280 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] | |
282 | repo.fileservice.prefetch(results, force=True) |
|
281 | repo.fileservice.prefetch(results, force=True) | |
283 |
|
282 | |||
284 | # Fetch files that may or may not be on the server |
|
283 | # Fetch files that may or may not be on the server | |
285 | if files: |
|
284 | if files: | |
286 | results = [(path, hex(fnode)) for (path, fnode) in files] |
|
285 | results = [(path, hex(fnode)) for (path, fnode) in files] | |
287 | repo.fileservice.prefetch(results) |
|
286 | repo.fileservice.prefetch(results) | |
288 |
|
287 | |||
289 | def close(self): |
|
288 | def close(self): | |
290 | super(shallowrepository, self).close() |
|
289 | super(shallowrepository, self).close() | |
291 | self.connectionpool.close() |
|
290 | self.connectionpool.close() | |
292 |
|
291 | |||
293 | repo.__class__ = shallowrepository |
|
292 | repo.__class__ = shallowrepository | |
294 |
|
293 | |||
295 | repo.shallowmatch = match.always(repo.root, '') |
|
294 | repo.shallowmatch = match.always(repo.root, '') | |
296 |
|
295 | |||
297 | makeunionstores(repo) |
|
296 | makeunionstores(repo) | |
298 |
|
297 | |||
299 | repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern", |
|
298 | repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern", | |
300 | None) |
|
299 | None) | |
301 | repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern", |
|
300 | repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern", | |
302 | None) |
|
301 | None) | |
303 | if not util.safehasattr(repo, 'connectionpool'): |
|
302 | if not util.safehasattr(repo, 'connectionpool'): | |
304 | repo.connectionpool = connectionpool.connectionpool(repo) |
|
303 | repo.connectionpool = connectionpool.connectionpool(repo) | |
305 |
|
304 | |||
306 | if repo.includepattern or repo.excludepattern: |
|
305 | if repo.includepattern or repo.excludepattern: | |
307 | repo.shallowmatch = match.match(repo.root, '', None, |
|
306 | repo.shallowmatch = match.match(repo.root, '', None, | |
308 | repo.includepattern, repo.excludepattern) |
|
307 | repo.includepattern, repo.excludepattern) |
@@ -1,469 +1,541 b'' | |||||
1 | # procutil.py - utility for managing processes and executable environment |
|
1 | # procutil.py - utility for managing processes and executable environment | |
2 | # |
|
2 | # | |
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> | |
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | from __future__ import absolute_import |
|
10 | from __future__ import absolute_import | |
11 |
|
11 | |||
12 | import contextlib |
|
12 | import contextlib | |
|
13 | import errno | |||
13 | import imp |
|
14 | import imp | |
14 | import io |
|
15 | import io | |
15 | import os |
|
16 | import os | |
16 | import signal |
|
17 | import signal | |
17 | import subprocess |
|
18 | import subprocess | |
18 | import sys |
|
19 | import sys | |
19 | import time |
|
20 | import time | |
20 |
|
21 | |||
21 | from ..i18n import _ |
|
22 | from ..i18n import _ | |
22 |
|
23 | |||
23 | from .. import ( |
|
24 | from .. import ( | |
24 | encoding, |
|
25 | encoding, | |
25 | error, |
|
26 | error, | |
26 | policy, |
|
27 | policy, | |
27 | pycompat, |
|
28 | pycompat, | |
28 | ) |
|
29 | ) | |
29 |
|
30 | |||
30 | osutil = policy.importmod(r'osutil') |
|
31 | osutil = policy.importmod(r'osutil') | |
31 |
|
32 | |||
32 | stderr = pycompat.stderr |
|
33 | stderr = pycompat.stderr | |
33 | stdin = pycompat.stdin |
|
34 | stdin = pycompat.stdin | |
34 | stdout = pycompat.stdout |
|
35 | stdout = pycompat.stdout | |
35 |
|
36 | |||
36 | def isatty(fp): |
|
37 | def isatty(fp): | |
37 | try: |
|
38 | try: | |
38 | return fp.isatty() |
|
39 | return fp.isatty() | |
39 | except AttributeError: |
|
40 | except AttributeError: | |
40 | return False |
|
41 | return False | |
41 |
|
42 | |||
42 | # glibc determines buffering on first write to stdout - if we replace a TTY |
|
43 | # glibc determines buffering on first write to stdout - if we replace a TTY | |
43 | # destined stdout with a pipe destined stdout (e.g. pager), we want line |
|
44 | # destined stdout with a pipe destined stdout (e.g. pager), we want line | |
44 | # buffering (or unbuffered, on Windows) |
|
45 | # buffering (or unbuffered, on Windows) | |
45 | if isatty(stdout): |
|
46 | if isatty(stdout): | |
46 | if pycompat.iswindows: |
|
47 | if pycompat.iswindows: | |
47 | # Windows doesn't support line buffering |
|
48 | # Windows doesn't support line buffering | |
48 | stdout = os.fdopen(stdout.fileno(), r'wb', 0) |
|
49 | stdout = os.fdopen(stdout.fileno(), r'wb', 0) | |
49 | else: |
|
50 | else: | |
50 | stdout = os.fdopen(stdout.fileno(), r'wb', 1) |
|
51 | stdout = os.fdopen(stdout.fileno(), r'wb', 1) | |
51 |
|
52 | |||
52 | if pycompat.iswindows: |
|
53 | if pycompat.iswindows: | |
53 | from .. import windows as platform |
|
54 | from .. import windows as platform | |
54 | stdout = platform.winstdout(stdout) |
|
55 | stdout = platform.winstdout(stdout) | |
55 | else: |
|
56 | else: | |
56 | from .. import posix as platform |
|
57 | from .. import posix as platform | |
57 |
|
58 | |||
58 | findexe = platform.findexe |
|
59 | findexe = platform.findexe | |
59 | _gethgcmd = platform.gethgcmd |
|
60 | _gethgcmd = platform.gethgcmd | |
60 | getuser = platform.getuser |
|
61 | getuser = platform.getuser | |
61 | getpid = os.getpid |
|
62 | getpid = os.getpid | |
62 | hidewindow = platform.hidewindow |
|
63 | hidewindow = platform.hidewindow | |
63 | quotecommand = platform.quotecommand |
|
64 | quotecommand = platform.quotecommand | |
64 | readpipe = platform.readpipe |
|
65 | readpipe = platform.readpipe | |
65 | setbinary = platform.setbinary |
|
66 | setbinary = platform.setbinary | |
66 | setsignalhandler = platform.setsignalhandler |
|
67 | setsignalhandler = platform.setsignalhandler | |
67 | shellquote = platform.shellquote |
|
68 | shellquote = platform.shellquote | |
68 | shellsplit = platform.shellsplit |
|
69 | shellsplit = platform.shellsplit | |
69 | spawndetached = platform.spawndetached |
|
70 | spawndetached = platform.spawndetached | |
70 | sshargs = platform.sshargs |
|
71 | sshargs = platform.sshargs | |
71 | testpid = platform.testpid |
|
72 | testpid = platform.testpid | |
72 |
|
73 | |||
73 | try: |
|
74 | try: | |
74 | setprocname = osutil.setprocname |
|
75 | setprocname = osutil.setprocname | |
75 | except AttributeError: |
|
76 | except AttributeError: | |
76 | pass |
|
77 | pass | |
77 | try: |
|
78 | try: | |
78 | unblocksignal = osutil.unblocksignal |
|
79 | unblocksignal = osutil.unblocksignal | |
79 | except AttributeError: |
|
80 | except AttributeError: | |
80 | pass |
|
81 | pass | |
81 |
|
82 | |||
82 | closefds = pycompat.isposix |
|
83 | closefds = pycompat.isposix | |
83 |
|
84 | |||
84 | def explainexit(code): |
|
85 | def explainexit(code): | |
85 | """return a message describing a subprocess status |
|
86 | """return a message describing a subprocess status | |
86 | (codes from kill are negative - not os.system/wait encoding)""" |
|
87 | (codes from kill are negative - not os.system/wait encoding)""" | |
87 | if code >= 0: |
|
88 | if code >= 0: | |
88 | return _("exited with status %d") % code |
|
89 | return _("exited with status %d") % code | |
89 | return _("killed by signal %d") % -code |
|
90 | return _("killed by signal %d") % -code | |
90 |
|
91 | |||
91 | class _pfile(object): |
|
92 | class _pfile(object): | |
92 | """File-like wrapper for a stream opened by subprocess.Popen()""" |
|
93 | """File-like wrapper for a stream opened by subprocess.Popen()""" | |
93 |
|
94 | |||
94 | def __init__(self, proc, fp): |
|
95 | def __init__(self, proc, fp): | |
95 | self._proc = proc |
|
96 | self._proc = proc | |
96 | self._fp = fp |
|
97 | self._fp = fp | |
97 |
|
98 | |||
98 | def close(self): |
|
99 | def close(self): | |
99 | # unlike os.popen(), this returns an integer in subprocess coding |
|
100 | # unlike os.popen(), this returns an integer in subprocess coding | |
100 | self._fp.close() |
|
101 | self._fp.close() | |
101 | return self._proc.wait() |
|
102 | return self._proc.wait() | |
102 |
|
103 | |||
103 | def __iter__(self): |
|
104 | def __iter__(self): | |
104 | return iter(self._fp) |
|
105 | return iter(self._fp) | |
105 |
|
106 | |||
106 | def __getattr__(self, attr): |
|
107 | def __getattr__(self, attr): | |
107 | return getattr(self._fp, attr) |
|
108 | return getattr(self._fp, attr) | |
108 |
|
109 | |||
109 | def __enter__(self): |
|
110 | def __enter__(self): | |
110 | return self |
|
111 | return self | |
111 |
|
112 | |||
112 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
113 | def __exit__(self, exc_type, exc_value, exc_tb): | |
113 | self.close() |
|
114 | self.close() | |
114 |
|
115 | |||
115 | def popen(cmd, mode='rb', bufsize=-1): |
|
116 | def popen(cmd, mode='rb', bufsize=-1): | |
116 | if mode == 'rb': |
|
117 | if mode == 'rb': | |
117 | return _popenreader(cmd, bufsize) |
|
118 | return _popenreader(cmd, bufsize) | |
118 | elif mode == 'wb': |
|
119 | elif mode == 'wb': | |
119 | return _popenwriter(cmd, bufsize) |
|
120 | return _popenwriter(cmd, bufsize) | |
120 | raise error.ProgrammingError('unsupported mode: %r' % mode) |
|
121 | raise error.ProgrammingError('unsupported mode: %r' % mode) | |
121 |
|
122 | |||
122 | def _popenreader(cmd, bufsize): |
|
123 | def _popenreader(cmd, bufsize): | |
123 | p = subprocess.Popen(tonativestr(quotecommand(cmd)), |
|
124 | p = subprocess.Popen(tonativestr(quotecommand(cmd)), | |
124 | shell=True, bufsize=bufsize, |
|
125 | shell=True, bufsize=bufsize, | |
125 | close_fds=closefds, |
|
126 | close_fds=closefds, | |
126 | stdout=subprocess.PIPE) |
|
127 | stdout=subprocess.PIPE) | |
127 | return _pfile(p, p.stdout) |
|
128 | return _pfile(p, p.stdout) | |
128 |
|
129 | |||
129 | def _popenwriter(cmd, bufsize): |
|
130 | def _popenwriter(cmd, bufsize): | |
130 | p = subprocess.Popen(tonativestr(quotecommand(cmd)), |
|
131 | p = subprocess.Popen(tonativestr(quotecommand(cmd)), | |
131 | shell=True, bufsize=bufsize, |
|
132 | shell=True, bufsize=bufsize, | |
132 | close_fds=closefds, |
|
133 | close_fds=closefds, | |
133 | stdin=subprocess.PIPE) |
|
134 | stdin=subprocess.PIPE) | |
134 | return _pfile(p, p.stdin) |
|
135 | return _pfile(p, p.stdin) | |
135 |
|
136 | |||
136 | def popen2(cmd, env=None): |
|
137 | def popen2(cmd, env=None): | |
137 | # Setting bufsize to -1 lets the system decide the buffer size. |
|
138 | # Setting bufsize to -1 lets the system decide the buffer size. | |
138 | # The default for bufsize is 0, meaning unbuffered. This leads to |
|
139 | # The default for bufsize is 0, meaning unbuffered. This leads to | |
139 | # poor performance on Mac OS X: http://bugs.python.org/issue4194 |
|
140 | # poor performance on Mac OS X: http://bugs.python.org/issue4194 | |
140 | p = subprocess.Popen(tonativestr(cmd), |
|
141 | p = subprocess.Popen(tonativestr(cmd), | |
141 | shell=True, bufsize=-1, |
|
142 | shell=True, bufsize=-1, | |
142 | close_fds=closefds, |
|
143 | close_fds=closefds, | |
143 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, |
|
144 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, | |
144 | env=tonativeenv(env)) |
|
145 | env=tonativeenv(env)) | |
145 | return p.stdin, p.stdout |
|
146 | return p.stdin, p.stdout | |
146 |
|
147 | |||
147 | def popen3(cmd, env=None): |
|
148 | def popen3(cmd, env=None): | |
148 | stdin, stdout, stderr, p = popen4(cmd, env) |
|
149 | stdin, stdout, stderr, p = popen4(cmd, env) | |
149 | return stdin, stdout, stderr |
|
150 | return stdin, stdout, stderr | |
150 |
|
151 | |||
151 | def popen4(cmd, env=None, bufsize=-1): |
|
152 | def popen4(cmd, env=None, bufsize=-1): | |
152 | p = subprocess.Popen(tonativestr(cmd), |
|
153 | p = subprocess.Popen(tonativestr(cmd), | |
153 | shell=True, bufsize=bufsize, |
|
154 | shell=True, bufsize=bufsize, | |
154 | close_fds=closefds, |
|
155 | close_fds=closefds, | |
155 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, |
|
156 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, | |
156 | stderr=subprocess.PIPE, |
|
157 | stderr=subprocess.PIPE, | |
157 | env=tonativeenv(env)) |
|
158 | env=tonativeenv(env)) | |
158 | return p.stdin, p.stdout, p.stderr, p |
|
159 | return p.stdin, p.stdout, p.stderr, p | |
159 |
|
160 | |||
160 | def pipefilter(s, cmd): |
|
161 | def pipefilter(s, cmd): | |
161 | '''filter string S through command CMD, returning its output''' |
|
162 | '''filter string S through command CMD, returning its output''' | |
162 | p = subprocess.Popen(tonativestr(cmd), |
|
163 | p = subprocess.Popen(tonativestr(cmd), | |
163 | shell=True, close_fds=closefds, |
|
164 | shell=True, close_fds=closefds, | |
164 | stdin=subprocess.PIPE, stdout=subprocess.PIPE) |
|
165 | stdin=subprocess.PIPE, stdout=subprocess.PIPE) | |
165 | pout, perr = p.communicate(s) |
|
166 | pout, perr = p.communicate(s) | |
166 | return pout |
|
167 | return pout | |
167 |
|
168 | |||
168 | def tempfilter(s, cmd): |
|
169 | def tempfilter(s, cmd): | |
169 | '''filter string S through a pair of temporary files with CMD. |
|
170 | '''filter string S through a pair of temporary files with CMD. | |
170 | CMD is used as a template to create the real command to be run, |
|
171 | CMD is used as a template to create the real command to be run, | |
171 | with the strings INFILE and OUTFILE replaced by the real names of |
|
172 | with the strings INFILE and OUTFILE replaced by the real names of | |
172 | the temporary files generated.''' |
|
173 | the temporary files generated.''' | |
173 | inname, outname = None, None |
|
174 | inname, outname = None, None | |
174 | try: |
|
175 | try: | |
175 | infd, inname = pycompat.mkstemp(prefix='hg-filter-in-') |
|
176 | infd, inname = pycompat.mkstemp(prefix='hg-filter-in-') | |
176 | fp = os.fdopen(infd, r'wb') |
|
177 | fp = os.fdopen(infd, r'wb') | |
177 | fp.write(s) |
|
178 | fp.write(s) | |
178 | fp.close() |
|
179 | fp.close() | |
179 | outfd, outname = pycompat.mkstemp(prefix='hg-filter-out-') |
|
180 | outfd, outname = pycompat.mkstemp(prefix='hg-filter-out-') | |
180 | os.close(outfd) |
|
181 | os.close(outfd) | |
181 | cmd = cmd.replace('INFILE', inname) |
|
182 | cmd = cmd.replace('INFILE', inname) | |
182 | cmd = cmd.replace('OUTFILE', outname) |
|
183 | cmd = cmd.replace('OUTFILE', outname) | |
183 | code = system(cmd) |
|
184 | code = system(cmd) | |
184 | if pycompat.sysplatform == 'OpenVMS' and code & 1: |
|
185 | if pycompat.sysplatform == 'OpenVMS' and code & 1: | |
185 | code = 0 |
|
186 | code = 0 | |
186 | if code: |
|
187 | if code: | |
187 | raise error.Abort(_("command '%s' failed: %s") % |
|
188 | raise error.Abort(_("command '%s' failed: %s") % | |
188 | (cmd, explainexit(code))) |
|
189 | (cmd, explainexit(code))) | |
189 | with open(outname, 'rb') as fp: |
|
190 | with open(outname, 'rb') as fp: | |
190 | return fp.read() |
|
191 | return fp.read() | |
191 | finally: |
|
192 | finally: | |
192 | try: |
|
193 | try: | |
193 | if inname: |
|
194 | if inname: | |
194 | os.unlink(inname) |
|
195 | os.unlink(inname) | |
195 | except OSError: |
|
196 | except OSError: | |
196 | pass |
|
197 | pass | |
197 | try: |
|
198 | try: | |
198 | if outname: |
|
199 | if outname: | |
199 | os.unlink(outname) |
|
200 | os.unlink(outname) | |
200 | except OSError: |
|
201 | except OSError: | |
201 | pass |
|
202 | pass | |
202 |
|
203 | |||
203 | _filtertable = { |
|
204 | _filtertable = { | |
204 | 'tempfile:': tempfilter, |
|
205 | 'tempfile:': tempfilter, | |
205 | 'pipe:': pipefilter, |
|
206 | 'pipe:': pipefilter, | |
206 | } |
|
207 | } | |
207 |
|
208 | |||
208 | def filter(s, cmd): |
|
209 | def filter(s, cmd): | |
209 | "filter a string through a command that transforms its input to its output" |
|
210 | "filter a string through a command that transforms its input to its output" | |
210 | for name, fn in _filtertable.iteritems(): |
|
211 | for name, fn in _filtertable.iteritems(): | |
211 | if cmd.startswith(name): |
|
212 | if cmd.startswith(name): | |
212 | return fn(s, cmd[len(name):].lstrip()) |
|
213 | return fn(s, cmd[len(name):].lstrip()) | |
213 | return pipefilter(s, cmd) |
|
214 | return pipefilter(s, cmd) | |
214 |
|
215 | |||
215 | def mainfrozen(): |
|
216 | def mainfrozen(): | |
216 | """return True if we are a frozen executable. |
|
217 | """return True if we are a frozen executable. | |
217 |
|
218 | |||
218 | The code supports py2exe (most common, Windows only) and tools/freeze |
|
219 | The code supports py2exe (most common, Windows only) and tools/freeze | |
219 | (portable, not much used). |
|
220 | (portable, not much used). | |
220 | """ |
|
221 | """ | |
221 | return (pycompat.safehasattr(sys, "frozen") or # new py2exe |
|
222 | return (pycompat.safehasattr(sys, "frozen") or # new py2exe | |
222 | pycompat.safehasattr(sys, "importers") or # old py2exe |
|
223 | pycompat.safehasattr(sys, "importers") or # old py2exe | |
223 | imp.is_frozen(u"__main__")) # tools/freeze |
|
224 | imp.is_frozen(u"__main__")) # tools/freeze | |
224 |
|
225 | |||
225 | _hgexecutable = None |
|
226 | _hgexecutable = None | |
226 |
|
227 | |||
227 | def hgexecutable(): |
|
228 | def hgexecutable(): | |
228 | """return location of the 'hg' executable. |
|
229 | """return location of the 'hg' executable. | |
229 |
|
230 | |||
230 | Defaults to $HG or 'hg' in the search path. |
|
231 | Defaults to $HG or 'hg' in the search path. | |
231 | """ |
|
232 | """ | |
232 | if _hgexecutable is None: |
|
233 | if _hgexecutable is None: | |
233 | hg = encoding.environ.get('HG') |
|
234 | hg = encoding.environ.get('HG') | |
234 | mainmod = sys.modules[r'__main__'] |
|
235 | mainmod = sys.modules[r'__main__'] | |
235 | if hg: |
|
236 | if hg: | |
236 | _sethgexecutable(hg) |
|
237 | _sethgexecutable(hg) | |
237 | elif mainfrozen(): |
|
238 | elif mainfrozen(): | |
238 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
239 | if getattr(sys, 'frozen', None) == 'macosx_app': | |
239 | # Env variable set by py2app |
|
240 | # Env variable set by py2app | |
240 | _sethgexecutable(encoding.environ['EXECUTABLEPATH']) |
|
241 | _sethgexecutable(encoding.environ['EXECUTABLEPATH']) | |
241 | else: |
|
242 | else: | |
242 | _sethgexecutable(pycompat.sysexecutable) |
|
243 | _sethgexecutable(pycompat.sysexecutable) | |
243 | elif (os.path.basename( |
|
244 | elif (os.path.basename( | |
244 | pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'): |
|
245 | pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'): | |
245 | _sethgexecutable(pycompat.fsencode(mainmod.__file__)) |
|
246 | _sethgexecutable(pycompat.fsencode(mainmod.__file__)) | |
246 | else: |
|
247 | else: | |
247 | exe = findexe('hg') or os.path.basename(sys.argv[0]) |
|
248 | exe = findexe('hg') or os.path.basename(sys.argv[0]) | |
248 | _sethgexecutable(exe) |
|
249 | _sethgexecutable(exe) | |
249 | return _hgexecutable |
|
250 | return _hgexecutable | |
250 |
|
251 | |||
251 | def _sethgexecutable(path): |
|
252 | def _sethgexecutable(path): | |
252 | """set location of the 'hg' executable""" |
|
253 | """set location of the 'hg' executable""" | |
253 | global _hgexecutable |
|
254 | global _hgexecutable | |
254 | _hgexecutable = path |
|
255 | _hgexecutable = path | |
255 |
|
256 | |||
256 | def _testfileno(f, stdf): |
|
257 | def _testfileno(f, stdf): | |
257 | fileno = getattr(f, 'fileno', None) |
|
258 | fileno = getattr(f, 'fileno', None) | |
258 | try: |
|
259 | try: | |
259 | return fileno and fileno() == stdf.fileno() |
|
260 | return fileno and fileno() == stdf.fileno() | |
260 | except io.UnsupportedOperation: |
|
261 | except io.UnsupportedOperation: | |
261 | return False # fileno() raised UnsupportedOperation |
|
262 | return False # fileno() raised UnsupportedOperation | |
262 |
|
263 | |||
263 | def isstdin(f): |
|
264 | def isstdin(f): | |
264 | return _testfileno(f, sys.__stdin__) |
|
265 | return _testfileno(f, sys.__stdin__) | |
265 |
|
266 | |||
266 | def isstdout(f): |
|
267 | def isstdout(f): | |
267 | return _testfileno(f, sys.__stdout__) |
|
268 | return _testfileno(f, sys.__stdout__) | |
268 |
|
269 | |||
269 | def protectstdio(uin, uout): |
|
270 | def protectstdio(uin, uout): | |
270 | """Duplicate streams and redirect original if (uin, uout) are stdio |
|
271 | """Duplicate streams and redirect original if (uin, uout) are stdio | |
271 |
|
272 | |||
272 | If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's |
|
273 | If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's | |
273 | redirected to stderr so the output is still readable. |
|
274 | redirected to stderr so the output is still readable. | |
274 |
|
275 | |||
275 | Returns (fin, fout) which point to the original (uin, uout) fds, but |
|
276 | Returns (fin, fout) which point to the original (uin, uout) fds, but | |
276 | may be copy of (uin, uout). The returned streams can be considered |
|
277 | may be copy of (uin, uout). The returned streams can be considered | |
277 | "owned" in that print(), exec(), etc. never reach to them. |
|
278 | "owned" in that print(), exec(), etc. never reach to them. | |
278 | """ |
|
279 | """ | |
279 | uout.flush() |
|
280 | uout.flush() | |
280 | fin, fout = uin, uout |
|
281 | fin, fout = uin, uout | |
281 | if _testfileno(uin, stdin): |
|
282 | if _testfileno(uin, stdin): | |
282 | newfd = os.dup(uin.fileno()) |
|
283 | newfd = os.dup(uin.fileno()) | |
283 | nullfd = os.open(os.devnull, os.O_RDONLY) |
|
284 | nullfd = os.open(os.devnull, os.O_RDONLY) | |
284 | os.dup2(nullfd, uin.fileno()) |
|
285 | os.dup2(nullfd, uin.fileno()) | |
285 | os.close(nullfd) |
|
286 | os.close(nullfd) | |
286 | fin = os.fdopen(newfd, r'rb') |
|
287 | fin = os.fdopen(newfd, r'rb') | |
287 | if _testfileno(uout, stdout): |
|
288 | if _testfileno(uout, stdout): | |
288 | newfd = os.dup(uout.fileno()) |
|
289 | newfd = os.dup(uout.fileno()) | |
289 | os.dup2(stderr.fileno(), uout.fileno()) |
|
290 | os.dup2(stderr.fileno(), uout.fileno()) | |
290 | fout = os.fdopen(newfd, r'wb') |
|
291 | fout = os.fdopen(newfd, r'wb') | |
291 | return fin, fout |
|
292 | return fin, fout | |
292 |
|
293 | |||
293 | def restorestdio(uin, uout, fin, fout): |
|
294 | def restorestdio(uin, uout, fin, fout): | |
294 | """Restore (uin, uout) streams from possibly duplicated (fin, fout)""" |
|
295 | """Restore (uin, uout) streams from possibly duplicated (fin, fout)""" | |
295 | uout.flush() |
|
296 | uout.flush() | |
296 | for f, uif in [(fin, uin), (fout, uout)]: |
|
297 | for f, uif in [(fin, uin), (fout, uout)]: | |
297 | if f is not uif: |
|
298 | if f is not uif: | |
298 | os.dup2(f.fileno(), uif.fileno()) |
|
299 | os.dup2(f.fileno(), uif.fileno()) | |
299 | f.close() |
|
300 | f.close() | |
300 |
|
301 | |||
301 | @contextlib.contextmanager |
|
302 | @contextlib.contextmanager | |
302 | def protectedstdio(uin, uout): |
|
303 | def protectedstdio(uin, uout): | |
303 | """Run code block with protected standard streams""" |
|
304 | """Run code block with protected standard streams""" | |
304 | fin, fout = protectstdio(uin, uout) |
|
305 | fin, fout = protectstdio(uin, uout) | |
305 | try: |
|
306 | try: | |
306 | yield fin, fout |
|
307 | yield fin, fout | |
307 | finally: |
|
308 | finally: | |
308 | restorestdio(uin, uout, fin, fout) |
|
309 | restorestdio(uin, uout, fin, fout) | |
309 |
|
310 | |||
310 | def shellenviron(environ=None): |
|
311 | def shellenviron(environ=None): | |
311 | """return environ with optional override, useful for shelling out""" |
|
312 | """return environ with optional override, useful for shelling out""" | |
312 | def py2shell(val): |
|
313 | def py2shell(val): | |
313 | 'convert python object into string that is useful to shell' |
|
314 | 'convert python object into string that is useful to shell' | |
314 | if val is None or val is False: |
|
315 | if val is None or val is False: | |
315 | return '0' |
|
316 | return '0' | |
316 | if val is True: |
|
317 | if val is True: | |
317 | return '1' |
|
318 | return '1' | |
318 | return pycompat.bytestr(val) |
|
319 | return pycompat.bytestr(val) | |
319 | env = dict(encoding.environ) |
|
320 | env = dict(encoding.environ) | |
320 | if environ: |
|
321 | if environ: | |
321 | env.update((k, py2shell(v)) for k, v in environ.iteritems()) |
|
322 | env.update((k, py2shell(v)) for k, v in environ.iteritems()) | |
322 | env['HG'] = hgexecutable() |
|
323 | env['HG'] = hgexecutable() | |
323 | return env |
|
324 | return env | |
324 |
|
325 | |||
325 | if pycompat.iswindows: |
|
326 | if pycompat.iswindows: | |
326 | def shelltonative(cmd, env): |
|
327 | def shelltonative(cmd, env): | |
327 | return platform.shelltocmdexe(cmd, shellenviron(env)) |
|
328 | return platform.shelltocmdexe(cmd, shellenviron(env)) | |
328 |
|
329 | |||
329 | tonativestr = encoding.strfromlocal |
|
330 | tonativestr = encoding.strfromlocal | |
330 | else: |
|
331 | else: | |
331 | def shelltonative(cmd, env): |
|
332 | def shelltonative(cmd, env): | |
332 | return cmd |
|
333 | return cmd | |
333 |
|
334 | |||
334 | tonativestr = pycompat.identity |
|
335 | tonativestr = pycompat.identity | |
335 |
|
336 | |||
336 | def tonativeenv(env): |
|
337 | def tonativeenv(env): | |
337 | '''convert the environment from bytes to strings suitable for Popen(), etc. |
|
338 | '''convert the environment from bytes to strings suitable for Popen(), etc. | |
338 | ''' |
|
339 | ''' | |
339 | return pycompat.rapply(tonativestr, env) |
|
340 | return pycompat.rapply(tonativestr, env) | |
340 |
|
341 | |||
341 | def system(cmd, environ=None, cwd=None, out=None): |
|
342 | def system(cmd, environ=None, cwd=None, out=None): | |
342 | '''enhanced shell command execution. |
|
343 | '''enhanced shell command execution. | |
343 | run with environment maybe modified, maybe in different dir. |
|
344 | run with environment maybe modified, maybe in different dir. | |
344 |
|
345 | |||
345 | if out is specified, it is assumed to be a file-like object that has a |
|
346 | if out is specified, it is assumed to be a file-like object that has a | |
346 | write() method. stdout and stderr will be redirected to out.''' |
|
347 | write() method. stdout and stderr will be redirected to out.''' | |
347 | try: |
|
348 | try: | |
348 | stdout.flush() |
|
349 | stdout.flush() | |
349 | except Exception: |
|
350 | except Exception: | |
350 | pass |
|
351 | pass | |
351 | cmd = quotecommand(cmd) |
|
352 | cmd = quotecommand(cmd) | |
352 | env = shellenviron(environ) |
|
353 | env = shellenviron(environ) | |
353 | if out is None or isstdout(out): |
|
354 | if out is None or isstdout(out): | |
354 | rc = subprocess.call(tonativestr(cmd), |
|
355 | rc = subprocess.call(tonativestr(cmd), | |
355 | shell=True, close_fds=closefds, |
|
356 | shell=True, close_fds=closefds, | |
356 | env=tonativeenv(env), |
|
357 | env=tonativeenv(env), | |
357 | cwd=pycompat.rapply(tonativestr, cwd)) |
|
358 | cwd=pycompat.rapply(tonativestr, cwd)) | |
358 | else: |
|
359 | else: | |
359 | proc = subprocess.Popen(tonativestr(cmd), |
|
360 | proc = subprocess.Popen(tonativestr(cmd), | |
360 | shell=True, close_fds=closefds, |
|
361 | shell=True, close_fds=closefds, | |
361 | env=tonativeenv(env), |
|
362 | env=tonativeenv(env), | |
362 | cwd=pycompat.rapply(tonativestr, cwd), |
|
363 | cwd=pycompat.rapply(tonativestr, cwd), | |
363 | stdout=subprocess.PIPE, |
|
364 | stdout=subprocess.PIPE, | |
364 | stderr=subprocess.STDOUT) |
|
365 | stderr=subprocess.STDOUT) | |
365 | for line in iter(proc.stdout.readline, ''): |
|
366 | for line in iter(proc.stdout.readline, ''): | |
366 | out.write(line) |
|
367 | out.write(line) | |
367 | proc.wait() |
|
368 | proc.wait() | |
368 | rc = proc.returncode |
|
369 | rc = proc.returncode | |
369 | if pycompat.sysplatform == 'OpenVMS' and rc & 1: |
|
370 | if pycompat.sysplatform == 'OpenVMS' and rc & 1: | |
370 | rc = 0 |
|
371 | rc = 0 | |
371 | return rc |
|
372 | return rc | |
372 |
|
373 | |||
373 | def gui(): |
|
374 | def gui(): | |
374 | '''Are we running in a GUI?''' |
|
375 | '''Are we running in a GUI?''' | |
375 | if pycompat.isdarwin: |
|
376 | if pycompat.isdarwin: | |
376 | if 'SSH_CONNECTION' in encoding.environ: |
|
377 | if 'SSH_CONNECTION' in encoding.environ: | |
377 | # handle SSH access to a box where the user is logged in |
|
378 | # handle SSH access to a box where the user is logged in | |
378 | return False |
|
379 | return False | |
379 | elif getattr(osutil, 'isgui', None): |
|
380 | elif getattr(osutil, 'isgui', None): | |
380 | # check if a CoreGraphics session is available |
|
381 | # check if a CoreGraphics session is available | |
381 | return osutil.isgui() |
|
382 | return osutil.isgui() | |
382 | else: |
|
383 | else: | |
383 | # pure build; use a safe default |
|
384 | # pure build; use a safe default | |
384 | return True |
|
385 | return True | |
385 | else: |
|
386 | else: | |
386 | return pycompat.iswindows or encoding.environ.get("DISPLAY") |
|
387 | return pycompat.iswindows or encoding.environ.get("DISPLAY") | |
387 |
|
388 | |||
388 | def hgcmd(): |
|
389 | def hgcmd(): | |
389 | """Return the command used to execute current hg |
|
390 | """Return the command used to execute current hg | |
390 |
|
391 | |||
391 | This is different from hgexecutable() because on Windows we want |
|
392 | This is different from hgexecutable() because on Windows we want | |
392 | to avoid things opening new shell windows like batch files, so we |
|
393 | to avoid things opening new shell windows like batch files, so we | |
393 | get either the python call or current executable. |
|
394 | get either the python call or current executable. | |
394 | """ |
|
395 | """ | |
395 | if mainfrozen(): |
|
396 | if mainfrozen(): | |
396 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
397 | if getattr(sys, 'frozen', None) == 'macosx_app': | |
397 | # Env variable set by py2app |
|
398 | # Env variable set by py2app | |
398 | return [encoding.environ['EXECUTABLEPATH']] |
|
399 | return [encoding.environ['EXECUTABLEPATH']] | |
399 | else: |
|
400 | else: | |
400 | return [pycompat.sysexecutable] |
|
401 | return [pycompat.sysexecutable] | |
401 | return _gethgcmd() |
|
402 | return _gethgcmd() | |
402 |
|
403 | |||
403 | def rundetached(args, condfn): |
|
404 | def rundetached(args, condfn): | |
404 | """Execute the argument list in a detached process. |
|
405 | """Execute the argument list in a detached process. | |
405 |
|
406 | |||
406 | condfn is a callable which is called repeatedly and should return |
|
407 | condfn is a callable which is called repeatedly and should return | |
407 | True once the child process is known to have started successfully. |
|
408 | True once the child process is known to have started successfully. | |
408 | At this point, the child process PID is returned. If the child |
|
409 | At this point, the child process PID is returned. If the child | |
409 | process fails to start or finishes before condfn() evaluates to |
|
410 | process fails to start or finishes before condfn() evaluates to | |
410 | True, return -1. |
|
411 | True, return -1. | |
411 | """ |
|
412 | """ | |
412 | # Windows case is easier because the child process is either |
|
413 | # Windows case is easier because the child process is either | |
413 | # successfully starting and validating the condition or exiting |
|
414 | # successfully starting and validating the condition or exiting | |
414 | # on failure. We just poll on its PID. On Unix, if the child |
|
415 | # on failure. We just poll on its PID. On Unix, if the child | |
415 | # process fails to start, it will be left in a zombie state until |
|
416 | # process fails to start, it will be left in a zombie state until | |
416 | # the parent wait on it, which we cannot do since we expect a long |
|
417 | # the parent wait on it, which we cannot do since we expect a long | |
417 | # running process on success. Instead we listen for SIGCHLD telling |
|
418 | # running process on success. Instead we listen for SIGCHLD telling | |
418 | # us our child process terminated. |
|
419 | # us our child process terminated. | |
419 | terminated = set() |
|
420 | terminated = set() | |
420 | def handler(signum, frame): |
|
421 | def handler(signum, frame): | |
421 | terminated.add(os.wait()) |
|
422 | terminated.add(os.wait()) | |
422 | prevhandler = None |
|
423 | prevhandler = None | |
423 | SIGCHLD = getattr(signal, 'SIGCHLD', None) |
|
424 | SIGCHLD = getattr(signal, 'SIGCHLD', None) | |
424 | if SIGCHLD is not None: |
|
425 | if SIGCHLD is not None: | |
425 | prevhandler = signal.signal(SIGCHLD, handler) |
|
426 | prevhandler = signal.signal(SIGCHLD, handler) | |
426 | try: |
|
427 | try: | |
427 | pid = spawndetached(args) |
|
428 | pid = spawndetached(args) | |
428 | while not condfn(): |
|
429 | while not condfn(): | |
429 | if ((pid in terminated or not testpid(pid)) |
|
430 | if ((pid in terminated or not testpid(pid)) | |
430 | and not condfn()): |
|
431 | and not condfn()): | |
431 | return -1 |
|
432 | return -1 | |
432 | time.sleep(0.1) |
|
433 | time.sleep(0.1) | |
433 | return pid |
|
434 | return pid | |
434 | finally: |
|
435 | finally: | |
435 | if prevhandler is not None: |
|
436 | if prevhandler is not None: | |
436 | signal.signal(signal.SIGCHLD, prevhandler) |
|
437 | signal.signal(signal.SIGCHLD, prevhandler) | |
437 |
|
438 | |||
438 | @contextlib.contextmanager |
|
439 | @contextlib.contextmanager | |
439 | def uninterruptable(warn): |
|
440 | def uninterruptable(warn): | |
440 | """Inhibit SIGINT handling on a region of code. |
|
441 | """Inhibit SIGINT handling on a region of code. | |
441 |
|
442 | |||
442 | Note that if this is called in a non-main thread, it turns into a no-op. |
|
443 | Note that if this is called in a non-main thread, it turns into a no-op. | |
443 |
|
444 | |||
444 | Args: |
|
445 | Args: | |
445 | warn: A callable which takes no arguments, and returns True if the |
|
446 | warn: A callable which takes no arguments, and returns True if the | |
446 | previous signal handling should be restored. |
|
447 | previous signal handling should be restored. | |
447 | """ |
|
448 | """ | |
448 |
|
449 | |||
449 | oldsiginthandler = [signal.getsignal(signal.SIGINT)] |
|
450 | oldsiginthandler = [signal.getsignal(signal.SIGINT)] | |
450 | shouldbail = [] |
|
451 | shouldbail = [] | |
451 |
|
452 | |||
452 | def disabledsiginthandler(*args): |
|
453 | def disabledsiginthandler(*args): | |
453 | if warn(): |
|
454 | if warn(): | |
454 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
455 | signal.signal(signal.SIGINT, oldsiginthandler[0]) | |
455 | del oldsiginthandler[0] |
|
456 | del oldsiginthandler[0] | |
456 | shouldbail.append(True) |
|
457 | shouldbail.append(True) | |
457 |
|
458 | |||
458 | try: |
|
459 | try: | |
459 | try: |
|
460 | try: | |
460 | signal.signal(signal.SIGINT, disabledsiginthandler) |
|
461 | signal.signal(signal.SIGINT, disabledsiginthandler) | |
461 | except ValueError: |
|
462 | except ValueError: | |
462 | # wrong thread, oh well, we tried |
|
463 | # wrong thread, oh well, we tried | |
463 | del oldsiginthandler[0] |
|
464 | del oldsiginthandler[0] | |
464 | yield |
|
465 | yield | |
465 | finally: |
|
466 | finally: | |
466 | if oldsiginthandler: |
|
467 | if oldsiginthandler: | |
467 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
468 | signal.signal(signal.SIGINT, oldsiginthandler[0]) | |
468 | if shouldbail: |
|
469 | if shouldbail: | |
469 | raise KeyboardInterrupt |
|
470 | raise KeyboardInterrupt | |
|
471 | ||||
|
472 | if pycompat.iswindows: | |||
|
473 | # no fork on Windows, but we can create a detached process | |||
|
474 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx | |||
|
475 | # No stdlib constant exists for this value | |||
|
476 | DETACHED_PROCESS = 0x00000008 | |||
|
477 | _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP | |||
|
478 | ||||
|
479 | def runbgcommand(script, env, shell=False, stdout=None, stderr=None): | |||
|
480 | '''Spawn a command without waiting for it to finish.''' | |||
|
481 | # we can't use close_fds *and* redirect stdin. I'm not sure that we | |||
|
482 | # need to because the detached process has no console connection. | |||
|
483 | subprocess.Popen( | |||
|
484 | script, shell=shell, env=env, close_fds=True, | |||
|
485 | creationflags=_creationflags, stdout=stdout, stderr=stderr) | |||
|
486 | else: | |||
|
487 | def runbgcommand(cmd, env, shell=False, stdout=None, stderr=None): | |||
|
488 | '''Spawn a command without waiting for it to finish.''' | |||
|
489 | # double-fork to completely detach from the parent process | |||
|
490 | # based on http://code.activestate.com/recipes/278731 | |||
|
491 | pid = os.fork() | |||
|
492 | if pid: | |||
|
493 | # Parent process | |||
|
494 | (_pid, status) = os.waitpid(pid, 0) | |||
|
495 | if os.WIFEXITED(status): | |||
|
496 | returncode = os.WEXITSTATUS(status) | |||
|
497 | else: | |||
|
498 | returncode = -os.WTERMSIG(status) | |||
|
499 | if returncode != 0: | |||
|
500 | # The child process's return code is 0 on success, an errno | |||
|
501 | # value on failure, or 255 if we don't have a valid errno | |||
|
502 | # value. | |||
|
503 | # | |||
|
504 | # (It would be slightly nicer to return the full exception info | |||
|
505 | # over a pipe as the subprocess module does. For now it | |||
|
506 | # doesn't seem worth adding that complexity here, though.) | |||
|
507 | if returncode == 255: | |||
|
508 | returncode = errno.EINVAL | |||
|
509 | raise OSError(returncode, 'error running %r: %s' % | |||
|
510 | (cmd, os.strerror(returncode))) | |||
|
511 | return | |||
|
512 | ||||
|
513 | returncode = 255 | |||
|
514 | try: | |||
|
515 | # Start a new session | |||
|
516 | os.setsid() | |||
|
517 | ||||
|
518 | stdin = open(os.devnull, 'r') | |||
|
519 | if stdout is None: | |||
|
520 | stdout = open(os.devnull, 'w') | |||
|
521 | if stderr is None: | |||
|
522 | stderr = open(os.devnull, 'w') | |||
|
523 | ||||
|
524 | # connect stdin to devnull to make sure the subprocess can't | |||
|
525 | # muck up that stream for mercurial. | |||
|
526 | subprocess.Popen( | |||
|
527 | cmd, shell=shell, env=env, close_fds=True, | |||
|
528 | stdin=stdin, stdout=stdout, stderr=stderr) | |||
|
529 | returncode = 0 | |||
|
530 | except EnvironmentError as ex: | |||
|
531 | returncode = (ex.errno & 0xff) | |||
|
532 | if returncode == 0: | |||
|
533 | # This shouldn't happen, but just in case make sure the | |||
|
534 | # return code is never 0 here. | |||
|
535 | returncode = 255 | |||
|
536 | except Exception: | |||
|
537 | returncode = 255 | |||
|
538 | finally: | |||
|
539 | # mission accomplished, this child needs to exit and not | |||
|
540 | # continue the hg process here. | |||
|
541 | os._exit(returncode) |
General Comments 0
You need to be logged in to leave comments.
Login now