Show More
@@ -317,7 +317,7 b' paths::' | |||||
317 | Assigns symbolic names to repositories. The left side is the |
|
317 | Assigns symbolic names to repositories. The left side is the | |
318 | symbolic name, and the right gives the directory or URL that is the |
|
318 | symbolic name, and the right gives the directory or URL that is the | |
319 | location of the repository. Default paths can be declared by |
|
319 | location of the repository. Default paths can be declared by | |
320 | setting the following entries. |
|
320 | setting the following entries. | |
321 | default;; |
|
321 | default;; | |
322 | Directory or URL to use when pulling if no source is specified. |
|
322 | Directory or URL to use when pulling if no source is specified. | |
323 | Default is set to repository from which the current repository |
|
323 | Default is set to repository from which the current repository | |
@@ -326,6 +326,18 b' paths::' | |||||
326 | Optional. Directory or URL to use when pushing if no destination |
|
326 | Optional. Directory or URL to use when pushing if no destination | |
327 | is specified. |
|
327 | is specified. | |
328 |
|
328 | |||
|
329 | server:: | |||
|
330 | Controls generic server settings. | |||
|
331 | uncompressed;; | |||
|
332 | Whether to allow clients to clone a repo using the uncompressed | |||
|
333 | streaming protocol. This transfers about 40% more data than a | |||
|
334 | regular clone, but uses less memory and CPU on both server and | |||
|
335 | client. Over a LAN (100Mbps or better) or a very fast WAN, an | |||
|
336 | uncompressed streaming clone is a lot faster (~10x) than a regular | |||
|
337 | clone. Over most WAN connections (anything slower than about | |||
|
338 | 6Mbps), uncompressed streaming is slower, because of the extra | |||
|
339 | data transfer overhead. Default is False. | |||
|
340 | ||||
329 | ui:: |
|
341 | ui:: | |
330 | User interface controls. |
|
342 | User interface controls. | |
331 | debug;; |
|
343 | debug;; |
@@ -128,13 +128,17 b' def walkchangerevs(ui, repo, pats, opts)' | |||||
128 | if not slowpath: |
|
128 | if not slowpath: | |
129 | # Only files, no patterns. Check the history of each file. |
|
129 | # Only files, no patterns. Check the history of each file. | |
130 | def filerevgen(filelog): |
|
130 | def filerevgen(filelog): | |
|
131 | cl_count = repo.changelog.count() | |||
131 | for i, window in increasing_windows(filelog.count()-1, -1): |
|
132 | for i, window in increasing_windows(filelog.count()-1, -1): | |
132 | revs = [] |
|
133 | revs = [] | |
133 | for j in xrange(i - window, i + 1): |
|
134 | for j in xrange(i - window, i + 1): | |
134 | revs.append(filelog.linkrev(filelog.node(j))) |
|
135 | revs.append(filelog.linkrev(filelog.node(j))) | |
135 | revs.reverse() |
|
136 | revs.reverse() | |
136 | for rev in revs: |
|
137 | for rev in revs: | |
137 | yield rev |
|
138 | # only yield rev for which we have the changelog, it can | |
|
139 | # happen while doing "hg log" during a pull or commit | |||
|
140 | if rev < cl_count: | |||
|
141 | yield rev | |||
138 |
|
142 | |||
139 | minrev, maxrev = min(revs), max(revs) |
|
143 | minrev, maxrev = min(revs), max(revs) | |
140 | for file_ in files: |
|
144 | for file_ in files: | |
@@ -970,7 +974,7 b' def clone(ui, source, dest=None, **opts)' | |||||
970 | ui.setconfig_remoteopts(**opts) |
|
974 | ui.setconfig_remoteopts(**opts) | |
971 | hg.clone(ui, ui.expandpath(source), dest, |
|
975 | hg.clone(ui, ui.expandpath(source), dest, | |
972 | pull=opts['pull'], |
|
976 | pull=opts['pull'], | |
973 |
stream=opts[' |
|
977 | stream=opts['uncompressed'], | |
974 | rev=opts['rev'], |
|
978 | rev=opts['rev'], | |
975 | update=not opts['noupdate']) |
|
979 | update=not opts['noupdate']) | |
976 |
|
980 | |||
@@ -2863,7 +2867,8 b' table = {' | |||||
2863 | ('r', 'rev', [], |
|
2867 | ('r', 'rev', [], | |
2864 | _('a changeset you would like to have after cloning')), |
|
2868 | _('a changeset you would like to have after cloning')), | |
2865 | ('', 'pull', None, _('use pull protocol to copy metadata')), |
|
2869 | ('', 'pull', None, _('use pull protocol to copy metadata')), | |
2866 | ('', 'stream', None, _('use streaming protocol (fast over LAN)')), |
|
2870 | ('', 'uncompressed', None, | |
|
2871 | _('use uncompressed transfer (fast over LAN)')), | |||
2867 | ('e', 'ssh', '', _('specify ssh command to use')), |
|
2872 | ('e', 'ssh', '', _('specify ssh command to use')), | |
2868 | ('', 'remotecmd', '', |
|
2873 | ('', 'remotecmd', '', | |
2869 | _('specify hg command to run on the remote side'))], |
|
2874 | _('specify hg command to run on the remote side'))], | |
@@ -3322,7 +3327,7 b' def dispatch(args):' | |||||
3322 | except (util.SignalInterrupt, KeyboardInterrupt): |
|
3327 | except (util.SignalInterrupt, KeyboardInterrupt): | |
3323 | raise |
|
3328 | raise | |
3324 | except Exception, inst: |
|
3329 | except Exception, inst: | |
3325 |
u.warn(_("*** failed to import extension %s: %s\n") % ( |
|
3330 | u.warn(_("*** failed to import extension %s: %s\n") % (ext_name, inst)) | |
3326 | if u.print_exc(): |
|
3331 | if u.print_exc(): | |
3327 | return 1 |
|
3332 | return 1 | |
3328 |
|
3333 | |||
@@ -3513,7 +3518,9 b' def dispatch(args):' | |||||
3513 | return inst.code |
|
3518 | return inst.code | |
3514 | except: |
|
3519 | except: | |
3515 | u.warn(_("** unknown exception encountered, details follow\n")) |
|
3520 | u.warn(_("** unknown exception encountered, details follow\n")) | |
3516 |
u.warn(_("** report bug details to |
|
3521 | u.warn(_("** report bug details to " | |
|
3522 | "http://www.selenic.com/mercurial/bts\n")) | |||
|
3523 | u.warn(_("** or mercurial@selenic.com\n")) | |||
3517 | u.warn(_("** Mercurial Distributed SCM (version %s)\n") |
|
3524 | u.warn(_("** Mercurial Distributed SCM (version %s)\n") | |
3518 | % version.get_version()) |
|
3525 | % version.get_version()) | |
3519 | raise |
|
3526 | raise |
@@ -39,21 +39,23 b' class changectx(object):' | |||||
39 |
|
39 | |||
40 | def parents(self): |
|
40 | def parents(self): | |
41 | """return contexts for each parent changeset""" |
|
41 | """return contexts for each parent changeset""" | |
42 | p = self.repo.changelog.parents(self._node) |
|
42 | p = self._repo.changelog.parents(self._node) | |
43 | return [ changectx(self._repo, x) for x in p ] |
|
43 | return [ changectx(self._repo, x) for x in p ] | |
44 |
|
44 | |||
45 | def children(self): |
|
45 | def children(self): | |
46 | """return contexts for each child changeset""" |
|
46 | """return contexts for each child changeset""" | |
47 | c = self.repo.changelog.children(self._node) |
|
47 | c = self._repo.changelog.children(self._node) | |
48 | return [ changectx(self._repo, x) for x in c ] |
|
48 | return [ changectx(self._repo, x) for x in c ] | |
49 |
|
49 | |||
50 | def filenode(self, path): |
|
50 | def filenode(self, path): | |
51 | node, flag = self._repo.manifest.find(self.changeset()[0], path) |
|
51 | node, flag = self._repo.manifest.find(self.changeset()[0], path) | |
52 | return node |
|
52 | return node | |
53 |
|
53 | |||
54 | def filectx(self, path): |
|
54 | def filectx(self, path, fileid=None): | |
55 | """get a file context from this changeset""" |
|
55 | """get a file context from this changeset""" | |
56 | return filectx(self._repo, path, fileid=self.filenode(path)) |
|
56 | if fileid is None: | |
|
57 | fileid = self.filenode(path) | |||
|
58 | return filectx(self._repo, path, fileid=fileid) | |||
57 |
|
59 | |||
58 | def filectxs(self): |
|
60 | def filectxs(self): | |
59 | """generate a file context for each file in this changeset's |
|
61 | """generate a file context for each file in this changeset's | |
@@ -77,10 +79,10 b' class filectx(object):' | |||||
77 |
|
79 | |||
78 | if self._id: |
|
80 | if self._id: | |
79 | # if given a changeset id, go ahead and look up the file |
|
81 | # if given a changeset id, go ahead and look up the file | |
80 |
self._changeset = change |
|
82 | self._changeset = self._repo.changelog.read(self._id) | |
81 | node, flag = self._repo.manifest.find(self._changeset[0], path) |
|
83 | node, flag = self._repo.manifest.find(self._changeset[0], path) | |
82 | self._node = node |
|
84 | self._filelog = self._repo.file(self._path) | |
83 |
self._file |
|
85 | self._filenode = node | |
84 | elif self._fileid: |
|
86 | elif self._fileid: | |
85 | # else be lazy |
|
87 | # else be lazy | |
86 | self._filelog = self._repo.file(self._path) |
|
88 | self._filelog = self._repo.file(self._path) |
@@ -97,7 +97,8 b' def clone(ui, source, dest=None, pull=Fa' | |||||
97 |
|
97 | |||
98 | pull: always pull from source repository, even in local case |
|
98 | pull: always pull from source repository, even in local case | |
99 |
|
99 | |||
100 |
stream: stream from repository (fast over |
|
100 | stream: stream raw data uncompressed from repository (fast over | |
|
101 | LAN, slow over WAN) | |||
101 |
|
102 | |||
102 | rev: revision to clone up to (implies pull=True) |
|
103 | rev: revision to clone up to (implies pull=True) | |
103 |
|
104 | |||
@@ -156,9 +157,9 b' def clone(ui, source, dest=None, pull=Fa' | |||||
156 | # we lock here to avoid premature writing to the target |
|
157 | # we lock here to avoid premature writing to the target | |
157 | dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock")) |
|
158 | dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock")) | |
158 |
|
159 | |||
159 |
|
|
160 | # we need to remove the (empty) data dir in dest so copyfiles | |
160 |
|
|
161 | # can do its work | |
161 |
|
|
162 | os.rmdir(os.path.join(dest_path, ".hg", "data")) | |
162 | files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i" |
|
163 | files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i" | |
163 | for f in files.split(): |
|
164 | for f in files.split(): | |
164 | src = os.path.join(source, ".hg", f) |
|
165 | src = os.path.join(source, ".hg", f) | |
@@ -169,8 +170,8 b' def clone(ui, source, dest=None, pull=Fa' | |||||
169 | if inst.errno != errno.ENOENT: |
|
170 | if inst.errno != errno.ENOENT: | |
170 | raise |
|
171 | raise | |
171 |
|
172 | |||
172 |
|
|
173 | # we need to re-init the repo after manually copying the data | |
173 |
|
|
174 | # into it | |
174 | dest_repo = repository(ui, dest) |
|
175 | dest_repo = repository(ui, dest) | |
175 |
|
176 | |||
176 | else: |
|
177 | else: |
@@ -860,7 +860,10 b' class hgweb(object):' | |||||
860 | or self.t("error", error="%r not found" % fname)) |
|
860 | or self.t("error", error="%r not found" % fname)) | |
861 |
|
861 | |||
862 | def do_capabilities(self, req): |
|
862 | def do_capabilities(self, req): | |
863 | resp = 'unbundle stream=%d' % (self.repo.revlogversion,) |
|
863 | caps = ['unbundle'] | |
|
864 | if self.repo.ui.configbool('server', 'uncompressed'): | |||
|
865 | caps.append('stream=%d' % self.repo.revlogversion) | |||
|
866 | resp = ' '.join(caps) | |||
864 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
867 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
865 | req.write(resp) |
|
868 | req.write(resp) | |
866 |
|
869 |
@@ -2204,8 +2204,11 b' class localrepository(repo.repository):' | |||||
2204 | return 1 |
|
2204 | return 1 | |
2205 |
|
2205 | |||
2206 | def stream_in(self, remote): |
|
2206 | def stream_in(self, remote): | |
|
2207 | fp = remote.stream_out() | |||
|
2208 | resp = int(fp.readline()) | |||
|
2209 | if resp != 0: | |||
|
2210 | raise util.Abort(_('operation forbidden by server')) | |||
2207 | self.ui.status(_('streaming all changes\n')) |
|
2211 | self.ui.status(_('streaming all changes\n')) | |
2208 | fp = remote.stream_out() |
|
|||
2209 | total_files, total_bytes = map(int, fp.readline().split(' ', 1)) |
|
2212 | total_files, total_bytes = map(int, fp.readline().split(' ', 1)) | |
2210 | self.ui.status(_('%d files to transfer, %s of data\n') % |
|
2213 | self.ui.status(_('%d files to transfer, %s of data\n') % | |
2211 | (total_files, util.bytecount(total_bytes))) |
|
2214 | (total_files, util.bytecount(total_bytes))) | |
@@ -2230,14 +2233,15 b' class localrepository(repo.repository):' | |||||
2230 |
|
2233 | |||
2231 | keyword arguments: |
|
2234 | keyword arguments: | |
2232 | heads: list of revs to clone (forces use of pull) |
|
2235 | heads: list of revs to clone (forces use of pull) | |
2233 | pull: force use of pull, even if remote can stream''' |
|
2236 | stream: use streaming clone if possible''' | |
2234 |
|
2237 | |||
2235 |
# now, all clients that can |
|
2238 | # now, all clients that can request uncompressed clones can | |
2236 |
# supported by all servers that can s |
|
2239 | # read repo formats supported by all servers that can serve | |
|
2240 | # them. | |||
2237 |
|
2241 | |||
2238 | # if revlog format changes, client will have to check version |
|
2242 | # if revlog format changes, client will have to check version | |
2239 |
# and format flags on "stream" capability, and s |
|
2243 | # and format flags on "stream" capability, and use | |
2240 | # compatible. |
|
2244 | # uncompressed only if compatible. | |
2241 |
|
2245 | |||
2242 | if stream and not heads and remote.capable('stream'): |
|
2246 | if stream and not heads and remote.capable('stream'): | |
2243 | return self.stream_in(remote) |
|
2247 | return self.stream_in(remote) |
@@ -60,8 +60,10 b' class sshserver(object):' | |||||
60 | capabilities: space separated list of tokens |
|
60 | capabilities: space separated list of tokens | |
61 | ''' |
|
61 | ''' | |
62 |
|
62 | |||
63 | r = "capabilities: unbundle stream=%d\n" % (self.repo.revlogversion,) |
|
63 | caps = ['unbundle'] | |
64 | self.respond(r) |
|
64 | if self.ui.configbool('server', 'uncompressed'): | |
|
65 | caps.append('stream=%d' % self.repo.revlogversion) | |||
|
66 | self.respond("capabilities: %s\n" % (' '.join(caps),)) | |||
65 |
|
67 | |||
66 | def do_lock(self): |
|
68 | def do_lock(self): | |
67 | '''DEPRECATED - allowing remote client to lock repo is not safe''' |
|
69 | '''DEPRECATED - allowing remote client to lock repo is not safe''' |
@@ -40,7 +40,8 b' def walkrepo(root):' | |||||
40 | yield x |
|
40 | yield x | |
41 | # write manifest before changelog |
|
41 | # write manifest before changelog | |
42 | meta = list(walk(root, False)) |
|
42 | meta = list(walk(root, False)) | |
43 |
meta.sort( |
|
43 | meta.sort() | |
|
44 | meta.reverse() | |||
44 | for x in meta: |
|
45 | for x in meta: | |
45 | yield x |
|
46 | yield x | |
46 |
|
47 | |||
@@ -59,6 +60,13 b' def walkrepo(root):' | |||||
59 | def stream_out(repo, fileobj): |
|
60 | def stream_out(repo, fileobj): | |
60 | '''stream out all metadata files in repository. |
|
61 | '''stream out all metadata files in repository. | |
61 | writes to file-like object, must support write() and optional flush().''' |
|
62 | writes to file-like object, must support write() and optional flush().''' | |
|
63 | ||||
|
64 | if not repo.ui.configbool('server', 'uncompressed'): | |||
|
65 | fileobj.write('1\n') | |||
|
66 | return | |||
|
67 | ||||
|
68 | fileobj.write('0\n') | |||
|
69 | ||||
62 | # get consistent snapshot of repo. lock during scan so lock not |
|
70 | # get consistent snapshot of repo. lock during scan so lock not | |
63 | # needed while we stream, and commits can happen. |
|
71 | # needed while we stream, and commits can happen. | |
64 | lock = repo.lock() |
|
72 | lock = repo.lock() |
@@ -209,7 +209,7 b' class ui(object):' | |||||
209 |
|
209 | |||
210 | def expandpath(self, loc, default=None): |
|
210 | def expandpath(self, loc, default=None): | |
211 | """Return repository location relative to cwd or from [paths]""" |
|
211 | """Return repository location relative to cwd or from [paths]""" | |
212 |
if "://" in loc or os.path. |
|
212 | if "://" in loc or os.path.isdir(loc): | |
213 | return loc |
|
213 | return loc | |
214 |
|
214 | |||
215 | path = self.config("paths", loc) |
|
215 | path = self.config("paths", loc) |
@@ -29,3 +29,23 b' changeset 3:4cbb1e70196a backs out chang' | |||||
29 | the backout changeset is a new head - do not forget to merge |
|
29 | the backout changeset is a new head - do not forget to merge | |
30 | (use "backout -m" if you want to auto-merge) |
|
30 | (use "backout -m" if you want to auto-merge) | |
31 | b: No such file or directory |
|
31 | b: No such file or directory | |
|
32 | adding a | |||
|
33 | adding b | |||
|
34 | adding c | |||
|
35 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |||
|
36 | adding d | |||
|
37 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
38 | (branch merge, don't forget to commit) | |||
|
39 | # backout of merge should fail | |||
|
40 | abort: cannot back out a merge changeset without --parent | |||
|
41 | # backout of merge with bad parent should fail | |||
|
42 | abort: cb9a9f314b8b is not a parent of b2f3bb92043e | |||
|
43 | # backout of non-merge with parent should fail | |||
|
44 | abort: cannot use --parent on non-merge changeset | |||
|
45 | # backout with valid parent should be ok | |||
|
46 | removing d | |||
|
47 | changeset 5:11fbd9be634c backs out changeset 4:b2f3bb92043e | |||
|
48 | rolling back last transaction | |||
|
49 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
50 | removing c | |||
|
51 | changeset 5:1a5f1a63bf2c backs out changeset 4:b2f3bb92043e |
@@ -1,23 +1,23 b'' | |||||
1 | #!/bin/sh |
|
1 | #!/bin/sh | |
2 |
|
2 | |||
3 | mkdir test |
|
3 | hg init test | |
4 | cd test |
|
4 | cd test | |
5 | echo foo>foo |
|
5 | echo foo>foo | |
6 | hg init |
|
6 | hg commit -A -d '0 0' -m 1 | |
7 | hg addremove |
|
7 | hg --config server.uncompressed=True serve -p 20059 -d --pid-file=hg1.pid | |
8 | hg commit -m 1 |
|
8 | cat hg1.pid >> $DAEMON_PIDS | |
9 | hg verify |
|
9 | hg serve -p 20060 -d --pid-file=hg2.pid | |
10 | hg serve -p 20059 -d --pid-file=hg.pid |
|
10 | cat hg2.pid >> $DAEMON_PIDS | |
11 | cat hg.pid >> $DAEMON_PIDS |
|
|||
12 | cd .. |
|
11 | cd .. | |
13 |
|
12 | |||
14 | echo % clone via stream |
|
13 | echo % clone via stream | |
15 |
http_proxy= hg clone -- |
|
14 | http_proxy= hg clone --uncompressed http://localhost:20059/ copy 2>&1 | \ | |
16 | sed -e 's/[0-9][0-9.]*/XXX/g' |
|
15 | sed -e 's/[0-9][0-9.]*/XXX/g' | |
17 | cd copy |
|
16 | cd copy | |
18 | hg verify |
|
17 | hg verify | |
19 |
|
18 | |||
20 | cd .. |
|
19 | echo % try to clone via stream, should use pull instead | |
|
20 | http_proxy= hg clone --uncompressed http://localhost:20060/ copy2 | |||
21 |
|
21 | |||
22 | echo % clone via pull |
|
22 | echo % clone via pull | |
23 | http_proxy= hg clone http://localhost:20059/ copy-pull |
|
23 | http_proxy= hg clone http://localhost:20059/ copy-pull |
@@ -4,7 +4,7 b' hg init a' | |||||
4 | cd a |
|
4 | cd a | |
5 | echo a > a |
|
5 | echo a > a | |
6 | hg ci -Ama -d '1123456789 0' |
|
6 | hg ci -Ama -d '1123456789 0' | |
7 | hg serve -p 20059 -d --pid-file=hg.pid |
|
7 | hg --config server.uncompressed=True serve -p 20059 -d --pid-file=hg.pid | |
8 | cat hg.pid >> $DAEMON_PIDS |
|
8 | cat hg.pid >> $DAEMON_PIDS | |
9 |
|
9 | |||
10 | cd .. |
|
10 | cd .. | |
@@ -14,7 +14,7 b' cat proxy.pid >> $DAEMON_PIDS' | |||||
14 | sleep 2 |
|
14 | sleep 2 | |
15 |
|
15 | |||
16 | echo %% url for proxy, stream |
|
16 | echo %% url for proxy, stream | |
17 |
http_proxy=http://localhost:20060/ hg --config http_proxy.always=True clone -- |
|
17 | http_proxy=http://localhost:20060/ hg --config http_proxy.always=True clone --uncompressed http://localhost:20059/ b | \ | |
18 | sed -e 's/[0-9][0-9.]*/XXX/g' |
|
18 | sed -e 's/[0-9][0-9.]*/XXX/g' | |
19 | cd b |
|
19 | cd b | |
20 | hg verify |
|
20 | hg verify |
@@ -1,10 +1,4 b'' | |||||
1 | (the addremove command is deprecated; use add and remove --after instead) |
|
|||
2 | adding foo |
|
1 | adding foo | |
3 | checking changesets |
|
|||
4 | checking manifests |
|
|||
5 | crosschecking files in changesets and manifests |
|
|||
6 | checking files |
|
|||
7 | 1 files, 1 changesets, 1 total revisions |
|
|||
8 | % clone via stream |
|
2 | % clone via stream | |
9 | streaming all changes |
|
3 | streaming all changes | |
10 | XXX files to transfer, XXX bytes of data |
|
4 | XXX files to transfer, XXX bytes of data | |
@@ -15,6 +9,13 b' checking manifests' | |||||
15 | crosschecking files in changesets and manifests |
|
9 | crosschecking files in changesets and manifests | |
16 | checking files |
|
10 | checking files | |
17 | 1 files, 1 changesets, 1 total revisions |
|
11 | 1 files, 1 changesets, 1 total revisions | |
|
12 | % try to clone via stream, should use pull instead | |||
|
13 | requesting all changes | |||
|
14 | adding changesets | |||
|
15 | adding manifests | |||
|
16 | adding file changes | |||
|
17 | added 1 changesets with 1 changes to 1 files | |||
|
18 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
18 | % clone via pull |
|
19 | % clone via pull | |
19 | requesting all changes |
|
20 | requesting all changes | |
20 | adding changesets |
|
21 | adding changesets |
@@ -27,11 +27,13 b' hg init remote' | |||||
27 | cd remote |
|
27 | cd remote | |
28 | echo this > foo |
|
28 | echo this > foo | |
29 | hg ci -A -m "init" -d "1000000 0" foo |
|
29 | hg ci -A -m "init" -d "1000000 0" foo | |
|
30 | echo '[server]' > .hg/hgrc | |||
|
31 | echo 'uncompressed = True' >> .hg/hgrc | |||
30 |
|
32 | |||
31 | cd .. |
|
33 | cd .. | |
32 |
|
34 | |||
33 | echo "# clone remote via stream" |
|
35 | echo "# clone remote via stream" | |
34 |
hg clone -e ./dummyssh -- |
|
36 | hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \ | |
35 | sed -e 's/[0-9][0-9.]*/XXX/g' |
|
37 | sed -e 's/[0-9][0-9.]*/XXX/g' | |
36 | cd local-stream |
|
38 | cd local-stream | |
37 | hg verify |
|
39 | hg verify |
General Comments 0
You need to be logged in to leave comments.
Login now