##// END OF EJS Templates
switch to the .hg/store layout, fix the tests
Benoit Boissinot -
r3853:c0b44915 default
parent child Browse files
Show More
@@ -1,266 +1,270 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists") % dest)
118 raise util.Abort(_("destination '%s' already exists") % dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dir_cleanup = None
130 dir_cleanup = None
131 if islocal(dest):
131 if islocal(dest):
132 dir_cleanup = DirCleanup(dest)
132 dir_cleanup = DirCleanup(dest)
133
133
134 abspath = source
134 abspath = source
135 copy = False
135 copy = False
136 if src_repo.local() and islocal(dest):
136 if src_repo.local() and islocal(dest):
137 abspath = os.path.abspath(source)
137 abspath = os.path.abspath(source)
138 copy = not pull and not rev
138 copy = not pull and not rev
139
139
140 src_lock, dest_lock = None, None
140 src_lock, dest_lock = None, None
141 if copy:
141 if copy:
142 try:
142 try:
143 # we use a lock here because if we race with commit, we
143 # we use a lock here because if we race with commit, we
144 # can end up with extra data in the cloned revlogs that's
144 # can end up with extra data in the cloned revlogs that's
145 # not pointed to by changesets, thus causing verify to
145 # not pointed to by changesets, thus causing verify to
146 # fail
146 # fail
147 src_lock = src_repo.lock()
147 src_lock = src_repo.lock()
148 except lock.LockException:
148 except lock.LockException:
149 copy = False
149 copy = False
150
150
151 if copy:
151 if copy:
152 def force_copy(src, dst):
152 def force_copy(src, dst):
153 try:
153 try:
154 util.copyfiles(src, dst)
154 util.copyfiles(src, dst)
155 except OSError, inst:
155 except OSError, inst:
156 if inst.errno != errno.ENOENT:
156 if inst.errno != errno.ENOENT:
157 raise
157 raise
158
158
159 src_store = os.path.realpath(src_repo.spath)
159 src_store = os.path.realpath(src_repo.spath)
160 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
161 dest_store = dest_path
162 if not os.path.exists(dest):
160 if not os.path.exists(dest):
163 os.mkdir(dest)
161 os.mkdir(dest)
162 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
164 os.mkdir(dest_path)
163 os.mkdir(dest_path)
164 if src_repo.spath != src_repo.path:
165 dest_store = os.path.join(dest_path, "store")
166 os.mkdir(dest_store)
167 else:
168 dest_store = dest_path
165 # copy the requires file
169 # copy the requires file
166 force_copy(src_repo.join("requires"),
170 force_copy(src_repo.join("requires"),
167 os.path.join(dest_path, "requires"))
171 os.path.join(dest_path, "requires"))
168 # we lock here to avoid premature writing to the target
172 # we lock here to avoid premature writing to the target
169 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
173 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
170
174
171 files = ("data",
175 files = ("data",
172 "00manifest.d", "00manifest.i",
176 "00manifest.d", "00manifest.i",
173 "00changelog.d", "00changelog.i")
177 "00changelog.d", "00changelog.i")
174 for f in files:
178 for f in files:
175 src = os.path.join(src_store, f)
179 src = os.path.join(src_store, f)
176 dst = os.path.join(dest_store, f)
180 dst = os.path.join(dest_store, f)
177 force_copy(src, dst)
181 force_copy(src, dst)
178
182
179 # we need to re-init the repo after manually copying the data
183 # we need to re-init the repo after manually copying the data
180 # into it
184 # into it
181 dest_repo = repository(ui, dest)
185 dest_repo = repository(ui, dest)
182
186
183 else:
187 else:
184 dest_repo = repository(ui, dest, create=True)
188 dest_repo = repository(ui, dest, create=True)
185
189
186 revs = None
190 revs = None
187 if rev:
191 if rev:
188 if 'lookup' not in src_repo.capabilities:
192 if 'lookup' not in src_repo.capabilities:
189 raise util.Abort(_("src repository does not support revision "
193 raise util.Abort(_("src repository does not support revision "
190 "lookup and so doesn't support clone by "
194 "lookup and so doesn't support clone by "
191 "revision"))
195 "revision"))
192 revs = [src_repo.lookup(r) for r in rev]
196 revs = [src_repo.lookup(r) for r in rev]
193
197
194 if dest_repo.local():
198 if dest_repo.local():
195 dest_repo.clone(src_repo, heads=revs, stream=stream)
199 dest_repo.clone(src_repo, heads=revs, stream=stream)
196 elif src_repo.local():
200 elif src_repo.local():
197 src_repo.push(dest_repo, revs=revs)
201 src_repo.push(dest_repo, revs=revs)
198 else:
202 else:
199 raise util.Abort(_("clone from remote to remote not supported"))
203 raise util.Abort(_("clone from remote to remote not supported"))
200
204
201 if src_lock:
205 if src_lock:
202 src_lock.release()
206 src_lock.release()
203
207
204 if dest_repo.local():
208 if dest_repo.local():
205 fp = dest_repo.opener("hgrc", "w", text=True)
209 fp = dest_repo.opener("hgrc", "w", text=True)
206 fp.write("[paths]\n")
210 fp.write("[paths]\n")
207 fp.write("default = %s\n" % abspath)
211 fp.write("default = %s\n" % abspath)
208 fp.close()
212 fp.close()
209
213
210 if dest_lock:
214 if dest_lock:
211 dest_lock.release()
215 dest_lock.release()
212
216
213 if update:
217 if update:
214 _update(dest_repo, dest_repo.changelog.tip())
218 _update(dest_repo, dest_repo.changelog.tip())
215 if dir_cleanup:
219 if dir_cleanup:
216 dir_cleanup.close()
220 dir_cleanup.close()
217
221
218 return src_repo, dest_repo
222 return src_repo, dest_repo
219
223
220 def _showstats(repo, stats):
224 def _showstats(repo, stats):
221 stats = ((stats[0], _("updated")),
225 stats = ((stats[0], _("updated")),
222 (stats[1], _("merged")),
226 (stats[1], _("merged")),
223 (stats[2], _("removed")),
227 (stats[2], _("removed")),
224 (stats[3], _("unresolved")))
228 (stats[3], _("unresolved")))
225 note = ", ".join([_("%d files %s") % s for s in stats])
229 note = ", ".join([_("%d files %s") % s for s in stats])
226 repo.ui.status("%s\n" % note)
230 repo.ui.status("%s\n" % note)
227
231
228 def _update(repo, node): return update(repo, node)
232 def _update(repo, node): return update(repo, node)
229
233
230 def update(repo, node):
234 def update(repo, node):
231 """update the working directory to node, merging linear changes"""
235 """update the working directory to node, merging linear changes"""
232 stats = _merge.update(repo, node, False, False, None, None)
236 stats = _merge.update(repo, node, False, False, None, None)
233 _showstats(repo, stats)
237 _showstats(repo, stats)
234 if stats[3]:
238 if stats[3]:
235 repo.ui.status(_("There are unresolved merges with"
239 repo.ui.status(_("There are unresolved merges with"
236 " locally modified files.\n"))
240 " locally modified files.\n"))
237 return stats[3]
241 return stats[3]
238
242
239 def clean(repo, node, wlock=None, show_stats=True):
243 def clean(repo, node, wlock=None, show_stats=True):
240 """forcibly switch the working directory to node, clobbering changes"""
244 """forcibly switch the working directory to node, clobbering changes"""
241 stats = _merge.update(repo, node, False, True, None, wlock)
245 stats = _merge.update(repo, node, False, True, None, wlock)
242 if show_stats: _showstats(repo, stats)
246 if show_stats: _showstats(repo, stats)
243 return stats[3]
247 return stats[3]
244
248
245 def merge(repo, node, force=None, remind=True, wlock=None):
249 def merge(repo, node, force=None, remind=True, wlock=None):
246 """branch merge with node, resolving changes"""
250 """branch merge with node, resolving changes"""
247 stats = _merge.update(repo, node, True, force, False, wlock)
251 stats = _merge.update(repo, node, True, force, False, wlock)
248 _showstats(repo, stats)
252 _showstats(repo, stats)
249 if stats[3]:
253 if stats[3]:
250 pl = repo.parents()
254 pl = repo.parents()
251 repo.ui.status(_("There are unresolved merges,"
255 repo.ui.status(_("There are unresolved merges,"
252 " you can redo the full merge using:\n"
256 " you can redo the full merge using:\n"
253 " hg update -C %s\n"
257 " hg update -C %s\n"
254 " hg merge %s\n")
258 " hg merge %s\n")
255 % (pl[0].rev(), pl[1].rev()))
259 % (pl[0].rev(), pl[1].rev()))
256 elif remind:
260 elif remind:
257 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
261 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
258 return stats[3]
262 return stats[3]
259
263
260 def revert(repo, node, choose, wlock):
264 def revert(repo, node, choose, wlock):
261 """revert changes to revision in node without updating dirstate"""
265 """revert changes to revision in node without updating dirstate"""
262 return _merge.update(repo, node, False, True, choose, wlock)[3]
266 return _merge.update(repo, node, False, True, choose, wlock)[3]
263
267
264 def verify(repo):
268 def verify(repo):
265 """verify the consistency of a repository"""
269 """verify the consistency of a repository"""
266 return _verify.verify(repo)
270 return _verify.verify(repo)
@@ -1,61 +1,63 b''
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os, mimetypes
9 import os, mimetypes
10 import os.path
10 import os.path
11
11
12 def get_mtime(repo_path):
12 def get_mtime(repo_path):
13 hg_path = os.path.join(repo_path, ".hg")
13 store_path = os.path.join(repo_path, ".hg")
14 cl_path = os.path.join(hg_path, "00changelog.i")
14 if not os.path.isdir(os.path.join(store_path, "data")):
15 if os.path.exists(os.path.join(cl_path)):
15 store_path = os.path.join(store_path, "store")
16 cl_path = os.path.join(store_path, "00changelog.i")
17 if os.path.exists(cl_path):
16 return os.stat(cl_path).st_mtime
18 return os.stat(cl_path).st_mtime
17 else:
19 else:
18 return os.stat(hg_path).st_mtime
20 return os.stat(store_path).st_mtime
19
21
20 def staticfile(directory, fname, req):
22 def staticfile(directory, fname, req):
21 """return a file inside directory with guessed content-type header
23 """return a file inside directory with guessed content-type header
22
24
23 fname always uses '/' as directory separator and isn't allowed to
25 fname always uses '/' as directory separator and isn't allowed to
24 contain unusual path components.
26 contain unusual path components.
25 Content-type is guessed using the mimetypes module.
27 Content-type is guessed using the mimetypes module.
26 Return an empty string if fname is illegal or file not found.
28 Return an empty string if fname is illegal or file not found.
27
29
28 """
30 """
29 parts = fname.split('/')
31 parts = fname.split('/')
30 path = directory
32 path = directory
31 for part in parts:
33 for part in parts:
32 if (part in ('', os.curdir, os.pardir) or
34 if (part in ('', os.curdir, os.pardir) or
33 os.sep in part or os.altsep is not None and os.altsep in part):
35 os.sep in part or os.altsep is not None and os.altsep in part):
34 return ""
36 return ""
35 path = os.path.join(path, part)
37 path = os.path.join(path, part)
36 try:
38 try:
37 os.stat(path)
39 os.stat(path)
38 ct = mimetypes.guess_type(path)[0] or "text/plain"
40 ct = mimetypes.guess_type(path)[0] or "text/plain"
39 req.header([('Content-type', ct),
41 req.header([('Content-type', ct),
40 ('Content-length', os.path.getsize(path))])
42 ('Content-length', os.path.getsize(path))])
41 return file(path, 'rb').read()
43 return file(path, 'rb').read()
42 except (TypeError, OSError):
44 except (TypeError, OSError):
43 # illegal fname or unreadable file
45 # illegal fname or unreadable file
44 return ""
46 return ""
45
47
46 def style_map(templatepath, style):
48 def style_map(templatepath, style):
47 """Return path to mapfile for a given style.
49 """Return path to mapfile for a given style.
48
50
49 Searches mapfile in the following locations:
51 Searches mapfile in the following locations:
50 1. templatepath/style/map
52 1. templatepath/style/map
51 2. templatepath/map-style
53 2. templatepath/map-style
52 3. templatepath/map
54 3. templatepath/map
53 """
55 """
54 locations = style and [os.path.join(style, "map"), "map-"+style] or []
56 locations = style and [os.path.join(style, "map"), "map-"+style] or []
55 locations.append("map")
57 locations.append("map")
56 for location in locations:
58 for location in locations:
57 mapfile = os.path.join(templatepath, location)
59 mapfile = os.path.join(templatepath, location)
58 if os.path.isfile(mapfile):
60 if os.path.isfile(mapfile):
59 return mapfile
61 return mapfile
60 raise RuntimeError("No hgweb templates found in %r" % templatepath)
62 raise RuntimeError("No hgweb templates found in %r" % templatepath)
61
63
@@ -1,1947 +1,1956 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1',)
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.path = os.path.join(path, ".hg")
35 self.path = os.path.join(path, ".hg")
36 self.root = os.path.realpath(path)
36 self.root = os.path.realpath(path)
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 #if self.spath != self.path:
46 os.mkdir(os.path.join(self.path, "store"))
47 # os.mkdir(self.spath)
47 requirements = ("revlogv1", "store")
48 requirements = ("revlogv1",)
49 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
50 for r in requirements:
49 for r in requirements:
51 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
52 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write('\0\0\0\2')
53 else:
54 else:
54 raise repo.RepoError(_("repository %s not found") % path)
55 raise repo.RepoError(_("repository %s not found") % path)
55 elif create:
56 elif create:
56 raise repo.RepoError(_("repository %s already exists") % path)
57 raise repo.RepoError(_("repository %s already exists") % path)
57 else:
58 else:
58 # find requirements
59 # find requirements
59 try:
60 try:
60 requirements = self.opener("requires").read().splitlines()
61 requirements = self.opener("requires").read().splitlines()
61 except IOError, inst:
62 except IOError, inst:
62 if inst.errno != errno.ENOENT:
63 if inst.errno != errno.ENOENT:
63 raise
64 raise
64 requirements = []
65 requirements = []
65 # check them
66 # check them
66 for r in requirements:
67 for r in requirements:
67 if r not in self.supported:
68 if r not in self.supported:
68 raise repo.RepoError(_("requirement '%s' not supported") % r)
69 raise repo.RepoError(_("requirement '%s' not supported") % r)
69
70
70 # setup store
71 # setup store
72 if "store" in requirements:
73 self.encodefn = util.encodefilename
74 self.decodefn = util.decodefilename
75 self.spath = os.path.join(self.path, "store")
76 else:
77 self.encodefn = lambda x: x
78 self.decodefn = lambda x: x
71 self.spath = self.path
79 self.spath = self.path
72 self.sopener = util.opener(self.spath)
80 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
73
81
74 self.ui = ui.ui(parentui=parentui)
82 self.ui = ui.ui(parentui=parentui)
75 try:
83 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
84 self.ui.readconfig(self.join("hgrc"), self.root)
77 except IOError:
85 except IOError:
78 pass
86 pass
79
87
80 v = self.ui.configrevlog()
88 v = self.ui.configrevlog()
81 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
89 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
82 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
90 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
83 fl = v.get('flags', None)
91 fl = v.get('flags', None)
84 flags = 0
92 flags = 0
85 if fl != None:
93 if fl != None:
86 for x in fl.split():
94 for x in fl.split():
87 flags |= revlog.flagstr(x)
95 flags |= revlog.flagstr(x)
88 elif self.revlogv1:
96 elif self.revlogv1:
89 flags = revlog.REVLOG_DEFAULT_FLAGS
97 flags = revlog.REVLOG_DEFAULT_FLAGS
90
98
91 v = self.revlogversion | flags
99 v = self.revlogversion | flags
92 self.manifest = manifest.manifest(self.sopener, v)
100 self.manifest = manifest.manifest(self.sopener, v)
93 self.changelog = changelog.changelog(self.sopener, v)
101 self.changelog = changelog.changelog(self.sopener, v)
94
102
95 # the changelog might not have the inline index flag
103 # the changelog might not have the inline index flag
96 # on. If the format of the changelog is the same as found in
104 # on. If the format of the changelog is the same as found in
97 # .hgrc, apply any flags found in the .hgrc as well.
105 # .hgrc, apply any flags found in the .hgrc as well.
98 # Otherwise, just version from the changelog
106 # Otherwise, just version from the changelog
99 v = self.changelog.version
107 v = self.changelog.version
100 if v == self.revlogversion:
108 if v == self.revlogversion:
101 v |= flags
109 v |= flags
102 self.revlogversion = v
110 self.revlogversion = v
103
111
104 self.tagscache = None
112 self.tagscache = None
105 self.branchcache = None
113 self.branchcache = None
106 self.nodetagscache = None
114 self.nodetagscache = None
107 self.encodepats = None
115 self.encodepats = None
108 self.decodepats = None
116 self.decodepats = None
109 self.transhandle = None
117 self.transhandle = None
110
118
111 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
119 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
112
120
113 def url(self):
121 def url(self):
114 return 'file:' + self.root
122 return 'file:' + self.root
115
123
116 def hook(self, name, throw=False, **args):
124 def hook(self, name, throw=False, **args):
117 def callhook(hname, funcname):
125 def callhook(hname, funcname):
118 '''call python hook. hook is callable object, looked up as
126 '''call python hook. hook is callable object, looked up as
119 name in python module. if callable returns "true", hook
127 name in python module. if callable returns "true", hook
120 fails, else passes. if hook raises exception, treated as
128 fails, else passes. if hook raises exception, treated as
121 hook failure. exception propagates if throw is "true".
129 hook failure. exception propagates if throw is "true".
122
130
123 reason for "true" meaning "hook failed" is so that
131 reason for "true" meaning "hook failed" is so that
124 unmodified commands (e.g. mercurial.commands.update) can
132 unmodified commands (e.g. mercurial.commands.update) can
125 be run as hooks without wrappers to convert return values.'''
133 be run as hooks without wrappers to convert return values.'''
126
134
127 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
135 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
128 d = funcname.rfind('.')
136 d = funcname.rfind('.')
129 if d == -1:
137 if d == -1:
130 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
138 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
131 % (hname, funcname))
139 % (hname, funcname))
132 modname = funcname[:d]
140 modname = funcname[:d]
133 try:
141 try:
134 obj = __import__(modname)
142 obj = __import__(modname)
135 except ImportError:
143 except ImportError:
136 try:
144 try:
137 # extensions are loaded with hgext_ prefix
145 # extensions are loaded with hgext_ prefix
138 obj = __import__("hgext_%s" % modname)
146 obj = __import__("hgext_%s" % modname)
139 except ImportError:
147 except ImportError:
140 raise util.Abort(_('%s hook is invalid '
148 raise util.Abort(_('%s hook is invalid '
141 '(import of "%s" failed)') %
149 '(import of "%s" failed)') %
142 (hname, modname))
150 (hname, modname))
143 try:
151 try:
144 for p in funcname.split('.')[1:]:
152 for p in funcname.split('.')[1:]:
145 obj = getattr(obj, p)
153 obj = getattr(obj, p)
146 except AttributeError, err:
154 except AttributeError, err:
147 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not defined)') %
156 '("%s" is not defined)') %
149 (hname, funcname))
157 (hname, funcname))
150 if not callable(obj):
158 if not callable(obj):
151 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
152 '("%s" is not callable)') %
160 '("%s" is not callable)') %
153 (hname, funcname))
161 (hname, funcname))
154 try:
162 try:
155 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
163 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
156 except (KeyboardInterrupt, util.SignalInterrupt):
164 except (KeyboardInterrupt, util.SignalInterrupt):
157 raise
165 raise
158 except Exception, exc:
166 except Exception, exc:
159 if isinstance(exc, util.Abort):
167 if isinstance(exc, util.Abort):
160 self.ui.warn(_('error: %s hook failed: %s\n') %
168 self.ui.warn(_('error: %s hook failed: %s\n') %
161 (hname, exc.args[0]))
169 (hname, exc.args[0]))
162 else:
170 else:
163 self.ui.warn(_('error: %s hook raised an exception: '
171 self.ui.warn(_('error: %s hook raised an exception: '
164 '%s\n') % (hname, exc))
172 '%s\n') % (hname, exc))
165 if throw:
173 if throw:
166 raise
174 raise
167 self.ui.print_exc()
175 self.ui.print_exc()
168 return True
176 return True
169 if r:
177 if r:
170 if throw:
178 if throw:
171 raise util.Abort(_('%s hook failed') % hname)
179 raise util.Abort(_('%s hook failed') % hname)
172 self.ui.warn(_('warning: %s hook failed\n') % hname)
180 self.ui.warn(_('warning: %s hook failed\n') % hname)
173 return r
181 return r
174
182
175 def runhook(name, cmd):
183 def runhook(name, cmd):
176 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
184 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
177 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
185 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
178 r = util.system(cmd, environ=env, cwd=self.root)
186 r = util.system(cmd, environ=env, cwd=self.root)
179 if r:
187 if r:
180 desc, r = util.explain_exit(r)
188 desc, r = util.explain_exit(r)
181 if throw:
189 if throw:
182 raise util.Abort(_('%s hook %s') % (name, desc))
190 raise util.Abort(_('%s hook %s') % (name, desc))
183 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
191 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
184 return r
192 return r
185
193
186 r = False
194 r = False
187 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
195 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
188 if hname.split(".", 1)[0] == name and cmd]
196 if hname.split(".", 1)[0] == name and cmd]
189 hooks.sort()
197 hooks.sort()
190 for hname, cmd in hooks:
198 for hname, cmd in hooks:
191 if cmd.startswith('python:'):
199 if cmd.startswith('python:'):
192 r = callhook(hname, cmd[7:].strip()) or r
200 r = callhook(hname, cmd[7:].strip()) or r
193 else:
201 else:
194 r = runhook(hname, cmd) or r
202 r = runhook(hname, cmd) or r
195 return r
203 return r
196
204
197 tag_disallowed = ':\r\n'
205 tag_disallowed = ':\r\n'
198
206
199 def tag(self, name, node, message, local, user, date):
207 def tag(self, name, node, message, local, user, date):
200 '''tag a revision with a symbolic name.
208 '''tag a revision with a symbolic name.
201
209
202 if local is True, the tag is stored in a per-repository file.
210 if local is True, the tag is stored in a per-repository file.
203 otherwise, it is stored in the .hgtags file, and a new
211 otherwise, it is stored in the .hgtags file, and a new
204 changeset is committed with the change.
212 changeset is committed with the change.
205
213
206 keyword arguments:
214 keyword arguments:
207
215
208 local: whether to store tag in non-version-controlled file
216 local: whether to store tag in non-version-controlled file
209 (default False)
217 (default False)
210
218
211 message: commit message to use if committing
219 message: commit message to use if committing
212
220
213 user: name of user to use if committing
221 user: name of user to use if committing
214
222
215 date: date tuple to use if committing'''
223 date: date tuple to use if committing'''
216
224
217 for c in self.tag_disallowed:
225 for c in self.tag_disallowed:
218 if c in name:
226 if c in name:
219 raise util.Abort(_('%r cannot be used in a tag name') % c)
227 raise util.Abort(_('%r cannot be used in a tag name') % c)
220
228
221 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
229 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
222
230
223 if local:
231 if local:
224 # local tags are stored in the current charset
232 # local tags are stored in the current charset
225 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
233 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
226 self.hook('tag', node=hex(node), tag=name, local=local)
234 self.hook('tag', node=hex(node), tag=name, local=local)
227 return
235 return
228
236
229 for x in self.status()[:5]:
237 for x in self.status()[:5]:
230 if '.hgtags' in x:
238 if '.hgtags' in x:
231 raise util.Abort(_('working copy of .hgtags is changed '
239 raise util.Abort(_('working copy of .hgtags is changed '
232 '(please commit .hgtags manually)'))
240 '(please commit .hgtags manually)'))
233
241
234 # committed tags are stored in UTF-8
242 # committed tags are stored in UTF-8
235 line = '%s %s\n' % (hex(node), util.fromlocal(name))
243 line = '%s %s\n' % (hex(node), util.fromlocal(name))
236 self.wfile('.hgtags', 'ab').write(line)
244 self.wfile('.hgtags', 'ab').write(line)
237 if self.dirstate.state('.hgtags') == '?':
245 if self.dirstate.state('.hgtags') == '?':
238 self.add(['.hgtags'])
246 self.add(['.hgtags'])
239
247
240 self.commit(['.hgtags'], message, user, date)
248 self.commit(['.hgtags'], message, user, date)
241 self.hook('tag', node=hex(node), tag=name, local=local)
249 self.hook('tag', node=hex(node), tag=name, local=local)
242
250
243 def tags(self):
251 def tags(self):
244 '''return a mapping of tag to node'''
252 '''return a mapping of tag to node'''
245 if not self.tagscache:
253 if not self.tagscache:
246 self.tagscache = {}
254 self.tagscache = {}
247
255
248 def parsetag(line, context):
256 def parsetag(line, context):
249 if not line:
257 if not line:
250 return
258 return
251 s = l.split(" ", 1)
259 s = l.split(" ", 1)
252 if len(s) != 2:
260 if len(s) != 2:
253 self.ui.warn(_("%s: cannot parse entry\n") % context)
261 self.ui.warn(_("%s: cannot parse entry\n") % context)
254 return
262 return
255 node, key = s
263 node, key = s
256 key = util.tolocal(key.strip()) # stored in UTF-8
264 key = util.tolocal(key.strip()) # stored in UTF-8
257 try:
265 try:
258 bin_n = bin(node)
266 bin_n = bin(node)
259 except TypeError:
267 except TypeError:
260 self.ui.warn(_("%s: node '%s' is not well formed\n") %
268 self.ui.warn(_("%s: node '%s' is not well formed\n") %
261 (context, node))
269 (context, node))
262 return
270 return
263 if bin_n not in self.changelog.nodemap:
271 if bin_n not in self.changelog.nodemap:
264 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
272 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
265 (context, key))
273 (context, key))
266 return
274 return
267 self.tagscache[key] = bin_n
275 self.tagscache[key] = bin_n
268
276
269 # read the tags file from each head, ending with the tip,
277 # read the tags file from each head, ending with the tip,
270 # and add each tag found to the map, with "newer" ones
278 # and add each tag found to the map, with "newer" ones
271 # taking precedence
279 # taking precedence
272 f = None
280 f = None
273 for rev, node, fnode in self._hgtagsnodes():
281 for rev, node, fnode in self._hgtagsnodes():
274 f = (f and f.filectx(fnode) or
282 f = (f and f.filectx(fnode) or
275 self.filectx('.hgtags', fileid=fnode))
283 self.filectx('.hgtags', fileid=fnode))
276 count = 0
284 count = 0
277 for l in f.data().splitlines():
285 for l in f.data().splitlines():
278 count += 1
286 count += 1
279 parsetag(l, _("%s, line %d") % (str(f), count))
287 parsetag(l, _("%s, line %d") % (str(f), count))
280
288
281 try:
289 try:
282 f = self.opener("localtags")
290 f = self.opener("localtags")
283 count = 0
291 count = 0
284 for l in f:
292 for l in f:
285 # localtags are stored in the local character set
293 # localtags are stored in the local character set
286 # while the internal tag table is stored in UTF-8
294 # while the internal tag table is stored in UTF-8
287 l = util.fromlocal(l)
295 l = util.fromlocal(l)
288 count += 1
296 count += 1
289 parsetag(l, _("localtags, line %d") % count)
297 parsetag(l, _("localtags, line %d") % count)
290 except IOError:
298 except IOError:
291 pass
299 pass
292
300
293 self.tagscache['tip'] = self.changelog.tip()
301 self.tagscache['tip'] = self.changelog.tip()
294
302
295 return self.tagscache
303 return self.tagscache
296
304
297 def _hgtagsnodes(self):
305 def _hgtagsnodes(self):
298 heads = self.heads()
306 heads = self.heads()
299 heads.reverse()
307 heads.reverse()
300 last = {}
308 last = {}
301 ret = []
309 ret = []
302 for node in heads:
310 for node in heads:
303 c = self.changectx(node)
311 c = self.changectx(node)
304 rev = c.rev()
312 rev = c.rev()
305 try:
313 try:
306 fnode = c.filenode('.hgtags')
314 fnode = c.filenode('.hgtags')
307 except repo.LookupError:
315 except repo.LookupError:
308 continue
316 continue
309 ret.append((rev, node, fnode))
317 ret.append((rev, node, fnode))
310 if fnode in last:
318 if fnode in last:
311 ret[last[fnode]] = None
319 ret[last[fnode]] = None
312 last[fnode] = len(ret) - 1
320 last[fnode] = len(ret) - 1
313 return [item for item in ret if item]
321 return [item for item in ret if item]
314
322
315 def tagslist(self):
323 def tagslist(self):
316 '''return a list of tags ordered by revision'''
324 '''return a list of tags ordered by revision'''
317 l = []
325 l = []
318 for t, n in self.tags().items():
326 for t, n in self.tags().items():
319 try:
327 try:
320 r = self.changelog.rev(n)
328 r = self.changelog.rev(n)
321 except:
329 except:
322 r = -2 # sort to the beginning of the list if unknown
330 r = -2 # sort to the beginning of the list if unknown
323 l.append((r, t, n))
331 l.append((r, t, n))
324 l.sort()
332 l.sort()
325 return [(t, n) for r, t, n in l]
333 return [(t, n) for r, t, n in l]
326
334
327 def nodetags(self, node):
335 def nodetags(self, node):
328 '''return the tags associated with a node'''
336 '''return the tags associated with a node'''
329 if not self.nodetagscache:
337 if not self.nodetagscache:
330 self.nodetagscache = {}
338 self.nodetagscache = {}
331 for t, n in self.tags().items():
339 for t, n in self.tags().items():
332 self.nodetagscache.setdefault(n, []).append(t)
340 self.nodetagscache.setdefault(n, []).append(t)
333 return self.nodetagscache.get(node, [])
341 return self.nodetagscache.get(node, [])
334
342
335 def branchtags(self):
343 def branchtags(self):
336 if self.branchcache != None:
344 if self.branchcache != None:
337 return self.branchcache
345 return self.branchcache
338
346
339 self.branchcache = {} # avoid recursion in changectx
347 self.branchcache = {} # avoid recursion in changectx
340
348
341 partial, last, lrev = self._readbranchcache()
349 partial, last, lrev = self._readbranchcache()
342
350
343 tiprev = self.changelog.count() - 1
351 tiprev = self.changelog.count() - 1
344 if lrev != tiprev:
352 if lrev != tiprev:
345 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
346 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
347
355
348 # the branch cache is stored on disk as UTF-8, but in the local
356 # the branch cache is stored on disk as UTF-8, but in the local
349 # charset internally
357 # charset internally
350 for k, v in partial.items():
358 for k, v in partial.items():
351 self.branchcache[util.tolocal(k)] = v
359 self.branchcache[util.tolocal(k)] = v
352 return self.branchcache
360 return self.branchcache
353
361
354 def _readbranchcache(self):
362 def _readbranchcache(self):
355 partial = {}
363 partial = {}
356 try:
364 try:
357 f = self.opener("branches.cache")
365 f = self.opener("branches.cache")
358 lines = f.read().split('\n')
366 lines = f.read().split('\n')
359 f.close()
367 f.close()
360 last, lrev = lines.pop(0).rstrip().split(" ", 1)
368 last, lrev = lines.pop(0).rstrip().split(" ", 1)
361 last, lrev = bin(last), int(lrev)
369 last, lrev = bin(last), int(lrev)
362 if not (lrev < self.changelog.count() and
370 if not (lrev < self.changelog.count() and
363 self.changelog.node(lrev) == last): # sanity check
371 self.changelog.node(lrev) == last): # sanity check
364 # invalidate the cache
372 # invalidate the cache
365 raise ValueError('Invalid branch cache: unknown tip')
373 raise ValueError('Invalid branch cache: unknown tip')
366 for l in lines:
374 for l in lines:
367 if not l: continue
375 if not l: continue
368 node, label = l.rstrip().split(" ", 1)
376 node, label = l.rstrip().split(" ", 1)
369 partial[label] = bin(node)
377 partial[label] = bin(node)
370 except (KeyboardInterrupt, util.SignalInterrupt):
378 except (KeyboardInterrupt, util.SignalInterrupt):
371 raise
379 raise
372 except Exception, inst:
380 except Exception, inst:
373 if self.ui.debugflag:
381 if self.ui.debugflag:
374 self.ui.warn(str(inst), '\n')
382 self.ui.warn(str(inst), '\n')
375 partial, last, lrev = {}, nullid, nullrev
383 partial, last, lrev = {}, nullid, nullrev
376 return partial, last, lrev
384 return partial, last, lrev
377
385
378 def _writebranchcache(self, branches, tip, tiprev):
386 def _writebranchcache(self, branches, tip, tiprev):
379 try:
387 try:
380 f = self.opener("branches.cache", "w")
388 f = self.opener("branches.cache", "w")
381 f.write("%s %s\n" % (hex(tip), tiprev))
389 f.write("%s %s\n" % (hex(tip), tiprev))
382 for label, node in branches.iteritems():
390 for label, node in branches.iteritems():
383 f.write("%s %s\n" % (hex(node), label))
391 f.write("%s %s\n" % (hex(node), label))
384 except IOError:
392 except IOError:
385 pass
393 pass
386
394
387 def _updatebranchcache(self, partial, start, end):
395 def _updatebranchcache(self, partial, start, end):
388 for r in xrange(start, end):
396 for r in xrange(start, end):
389 c = self.changectx(r)
397 c = self.changectx(r)
390 b = c.branch()
398 b = c.branch()
391 if b:
399 if b:
392 partial[b] = c.node()
400 partial[b] = c.node()
393
401
394 def lookup(self, key):
402 def lookup(self, key):
395 if key == '.':
403 if key == '.':
396 key = self.dirstate.parents()[0]
404 key = self.dirstate.parents()[0]
397 if key == nullid:
405 if key == nullid:
398 raise repo.RepoError(_("no revision checked out"))
406 raise repo.RepoError(_("no revision checked out"))
399 elif key == 'null':
407 elif key == 'null':
400 return nullid
408 return nullid
401 n = self.changelog._match(key)
409 n = self.changelog._match(key)
402 if n:
410 if n:
403 return n
411 return n
404 if key in self.tags():
412 if key in self.tags():
405 return self.tags()[key]
413 return self.tags()[key]
406 if key in self.branchtags():
414 if key in self.branchtags():
407 return self.branchtags()[key]
415 return self.branchtags()[key]
408 n = self.changelog._partialmatch(key)
416 n = self.changelog._partialmatch(key)
409 if n:
417 if n:
410 return n
418 return n
411 raise repo.RepoError(_("unknown revision '%s'") % key)
419 raise repo.RepoError(_("unknown revision '%s'") % key)
412
420
413 def dev(self):
421 def dev(self):
414 return os.lstat(self.path).st_dev
422 return os.lstat(self.path).st_dev
415
423
416 def local(self):
424 def local(self):
417 return True
425 return True
418
426
419 def join(self, f):
427 def join(self, f):
420 return os.path.join(self.path, f)
428 return os.path.join(self.path, f)
421
429
422 def sjoin(self, f):
430 def sjoin(self, f):
431 f = self.encodefn(f)
423 return os.path.join(self.spath, f)
432 return os.path.join(self.spath, f)
424
433
425 def wjoin(self, f):
434 def wjoin(self, f):
426 return os.path.join(self.root, f)
435 return os.path.join(self.root, f)
427
436
428 def file(self, f):
437 def file(self, f):
429 if f[0] == '/':
438 if f[0] == '/':
430 f = f[1:]
439 f = f[1:]
431 return filelog.filelog(self.sopener, f, self.revlogversion)
440 return filelog.filelog(self.sopener, f, self.revlogversion)
432
441
433 def changectx(self, changeid=None):
442 def changectx(self, changeid=None):
434 return context.changectx(self, changeid)
443 return context.changectx(self, changeid)
435
444
436 def workingctx(self):
445 def workingctx(self):
437 return context.workingctx(self)
446 return context.workingctx(self)
438
447
439 def parents(self, changeid=None):
448 def parents(self, changeid=None):
440 '''
449 '''
441 get list of changectxs for parents of changeid or working directory
450 get list of changectxs for parents of changeid or working directory
442 '''
451 '''
443 if changeid is None:
452 if changeid is None:
444 pl = self.dirstate.parents()
453 pl = self.dirstate.parents()
445 else:
454 else:
446 n = self.changelog.lookup(changeid)
455 n = self.changelog.lookup(changeid)
447 pl = self.changelog.parents(n)
456 pl = self.changelog.parents(n)
448 if pl[1] == nullid:
457 if pl[1] == nullid:
449 return [self.changectx(pl[0])]
458 return [self.changectx(pl[0])]
450 return [self.changectx(pl[0]), self.changectx(pl[1])]
459 return [self.changectx(pl[0]), self.changectx(pl[1])]
451
460
452 def filectx(self, path, changeid=None, fileid=None):
461 def filectx(self, path, changeid=None, fileid=None):
453 """changeid can be a changeset revision, node, or tag.
462 """changeid can be a changeset revision, node, or tag.
454 fileid can be a file revision or node."""
463 fileid can be a file revision or node."""
455 return context.filectx(self, path, changeid, fileid)
464 return context.filectx(self, path, changeid, fileid)
456
465
457 def getcwd(self):
466 def getcwd(self):
458 return self.dirstate.getcwd()
467 return self.dirstate.getcwd()
459
468
460 def wfile(self, f, mode='r'):
469 def wfile(self, f, mode='r'):
461 return self.wopener(f, mode)
470 return self.wopener(f, mode)
462
471
463 def wread(self, filename):
472 def wread(self, filename):
464 if self.encodepats == None:
473 if self.encodepats == None:
465 l = []
474 l = []
466 for pat, cmd in self.ui.configitems("encode"):
475 for pat, cmd in self.ui.configitems("encode"):
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
476 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 l.append((mf, cmd))
477 l.append((mf, cmd))
469 self.encodepats = l
478 self.encodepats = l
470
479
471 data = self.wopener(filename, 'r').read()
480 data = self.wopener(filename, 'r').read()
472
481
473 for mf, cmd in self.encodepats:
482 for mf, cmd in self.encodepats:
474 if mf(filename):
483 if mf(filename):
475 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
484 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
476 data = util.filter(data, cmd)
485 data = util.filter(data, cmd)
477 break
486 break
478
487
479 return data
488 return data
480
489
481 def wwrite(self, filename, data, fd=None):
490 def wwrite(self, filename, data, fd=None):
482 if self.decodepats == None:
491 if self.decodepats == None:
483 l = []
492 l = []
484 for pat, cmd in self.ui.configitems("decode"):
493 for pat, cmd in self.ui.configitems("decode"):
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
494 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 l.append((mf, cmd))
495 l.append((mf, cmd))
487 self.decodepats = l
496 self.decodepats = l
488
497
489 for mf, cmd in self.decodepats:
498 for mf, cmd in self.decodepats:
490 if mf(filename):
499 if mf(filename):
491 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
500 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
492 data = util.filter(data, cmd)
501 data = util.filter(data, cmd)
493 break
502 break
494
503
495 if fd:
504 if fd:
496 return fd.write(data)
505 return fd.write(data)
497 return self.wopener(filename, 'w').write(data)
506 return self.wopener(filename, 'w').write(data)
498
507
499 def transaction(self):
508 def transaction(self):
500 tr = self.transhandle
509 tr = self.transhandle
501 if tr != None and tr.running():
510 if tr != None and tr.running():
502 return tr.nest()
511 return tr.nest()
503
512
504 # save dirstate for rollback
513 # save dirstate for rollback
505 try:
514 try:
506 ds = self.opener("dirstate").read()
515 ds = self.opener("dirstate").read()
507 except IOError:
516 except IOError:
508 ds = ""
517 ds = ""
509 self.opener("journal.dirstate", "w").write(ds)
518 self.opener("journal.dirstate", "w").write(ds)
510
519
511 renames = [(self.sjoin("journal"), self.sjoin("undo")),
520 renames = [(self.sjoin("journal"), self.sjoin("undo")),
512 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
521 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
513 tr = transaction.transaction(self.ui.warn, self.sopener,
522 tr = transaction.transaction(self.ui.warn, self.sopener,
514 self.sjoin("journal"),
523 self.sjoin("journal"),
515 aftertrans(renames))
524 aftertrans(renames))
516 self.transhandle = tr
525 self.transhandle = tr
517 return tr
526 return tr
518
527
519 def recover(self):
528 def recover(self):
520 l = self.lock()
529 l = self.lock()
521 if os.path.exists(self.sjoin("journal")):
530 if os.path.exists(self.sjoin("journal")):
522 self.ui.status(_("rolling back interrupted transaction\n"))
531 self.ui.status(_("rolling back interrupted transaction\n"))
523 transaction.rollback(self.sopener, self.sjoin("journal"))
532 transaction.rollback(self.sopener, self.sjoin("journal"))
524 self.reload()
533 self.reload()
525 return True
534 return True
526 else:
535 else:
527 self.ui.warn(_("no interrupted transaction available\n"))
536 self.ui.warn(_("no interrupted transaction available\n"))
528 return False
537 return False
529
538
530 def rollback(self, wlock=None):
539 def rollback(self, wlock=None):
531 if not wlock:
540 if not wlock:
532 wlock = self.wlock()
541 wlock = self.wlock()
533 l = self.lock()
542 l = self.lock()
534 if os.path.exists(self.sjoin("undo")):
543 if os.path.exists(self.sjoin("undo")):
535 self.ui.status(_("rolling back last transaction\n"))
544 self.ui.status(_("rolling back last transaction\n"))
536 transaction.rollback(self.sopener, self.sjoin("undo"))
545 transaction.rollback(self.sopener, self.sjoin("undo"))
537 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
546 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
538 self.reload()
547 self.reload()
539 self.wreload()
548 self.wreload()
540 else:
549 else:
541 self.ui.warn(_("no rollback information available\n"))
550 self.ui.warn(_("no rollback information available\n"))
542
551
543 def wreload(self):
552 def wreload(self):
544 self.dirstate.read()
553 self.dirstate.read()
545
554
546 def reload(self):
555 def reload(self):
547 self.changelog.load()
556 self.changelog.load()
548 self.manifest.load()
557 self.manifest.load()
549 self.tagscache = None
558 self.tagscache = None
550 self.nodetagscache = None
559 self.nodetagscache = None
551
560
552 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
561 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
553 desc=None):
562 desc=None):
554 try:
563 try:
555 l = lock.lock(lockname, 0, releasefn, desc=desc)
564 l = lock.lock(lockname, 0, releasefn, desc=desc)
556 except lock.LockHeld, inst:
565 except lock.LockHeld, inst:
557 if not wait:
566 if not wait:
558 raise
567 raise
559 self.ui.warn(_("waiting for lock on %s held by %r\n") %
568 self.ui.warn(_("waiting for lock on %s held by %r\n") %
560 (desc, inst.locker))
569 (desc, inst.locker))
561 # default to 600 seconds timeout
570 # default to 600 seconds timeout
562 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
571 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
563 releasefn, desc=desc)
572 releasefn, desc=desc)
564 if acquirefn:
573 if acquirefn:
565 acquirefn()
574 acquirefn()
566 return l
575 return l
567
576
568 def lock(self, wait=1):
577 def lock(self, wait=1):
569 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
578 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
570 desc=_('repository %s') % self.origroot)
579 desc=_('repository %s') % self.origroot)
571
580
572 def wlock(self, wait=1):
581 def wlock(self, wait=1):
573 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
582 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
574 self.wreload,
583 self.wreload,
575 desc=_('working directory of %s') % self.origroot)
584 desc=_('working directory of %s') % self.origroot)
576
585
577 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
586 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
578 """
587 """
579 commit an individual file as part of a larger transaction
588 commit an individual file as part of a larger transaction
580 """
589 """
581
590
582 t = self.wread(fn)
591 t = self.wread(fn)
583 fl = self.file(fn)
592 fl = self.file(fn)
584 fp1 = manifest1.get(fn, nullid)
593 fp1 = manifest1.get(fn, nullid)
585 fp2 = manifest2.get(fn, nullid)
594 fp2 = manifest2.get(fn, nullid)
586
595
587 meta = {}
596 meta = {}
588 cp = self.dirstate.copied(fn)
597 cp = self.dirstate.copied(fn)
589 if cp:
598 if cp:
590 meta["copy"] = cp
599 meta["copy"] = cp
591 if not manifest2: # not a branch merge
600 if not manifest2: # not a branch merge
592 meta["copyrev"] = hex(manifest1.get(cp, nullid))
601 meta["copyrev"] = hex(manifest1.get(cp, nullid))
593 fp2 = nullid
602 fp2 = nullid
594 elif fp2 != nullid: # copied on remote side
603 elif fp2 != nullid: # copied on remote side
595 meta["copyrev"] = hex(manifest1.get(cp, nullid))
604 meta["copyrev"] = hex(manifest1.get(cp, nullid))
596 elif fp1 != nullid: # copied on local side, reversed
605 elif fp1 != nullid: # copied on local side, reversed
597 meta["copyrev"] = hex(manifest2.get(cp))
606 meta["copyrev"] = hex(manifest2.get(cp))
598 fp2 = nullid
607 fp2 = nullid
599 else: # directory rename
608 else: # directory rename
600 meta["copyrev"] = hex(manifest1.get(cp, nullid))
609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
601 self.ui.debug(_(" %s: copy %s:%s\n") %
610 self.ui.debug(_(" %s: copy %s:%s\n") %
602 (fn, cp, meta["copyrev"]))
611 (fn, cp, meta["copyrev"]))
603 fp1 = nullid
612 fp1 = nullid
604 elif fp2 != nullid:
613 elif fp2 != nullid:
605 # is one parent an ancestor of the other?
614 # is one parent an ancestor of the other?
606 fpa = fl.ancestor(fp1, fp2)
615 fpa = fl.ancestor(fp1, fp2)
607 if fpa == fp1:
616 if fpa == fp1:
608 fp1, fp2 = fp2, nullid
617 fp1, fp2 = fp2, nullid
609 elif fpa == fp2:
618 elif fpa == fp2:
610 fp2 = nullid
619 fp2 = nullid
611
620
612 # is the file unmodified from the parent? report existing entry
621 # is the file unmodified from the parent? report existing entry
613 if fp2 == nullid and not fl.cmp(fp1, t):
622 if fp2 == nullid and not fl.cmp(fp1, t):
614 return fp1
623 return fp1
615
624
616 changelist.append(fn)
625 changelist.append(fn)
617 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
626 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
618
627
619 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
628 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
620 if p1 is None:
629 if p1 is None:
621 p1, p2 = self.dirstate.parents()
630 p1, p2 = self.dirstate.parents()
622 return self.commit(files=files, text=text, user=user, date=date,
631 return self.commit(files=files, text=text, user=user, date=date,
623 p1=p1, p2=p2, wlock=wlock)
632 p1=p1, p2=p2, wlock=wlock)
624
633
625 def commit(self, files=None, text="", user=None, date=None,
634 def commit(self, files=None, text="", user=None, date=None,
626 match=util.always, force=False, lock=None, wlock=None,
635 match=util.always, force=False, lock=None, wlock=None,
627 force_editor=False, p1=None, p2=None, extra={}):
636 force_editor=False, p1=None, p2=None, extra={}):
628
637
629 commit = []
638 commit = []
630 remove = []
639 remove = []
631 changed = []
640 changed = []
632 use_dirstate = (p1 is None) # not rawcommit
641 use_dirstate = (p1 is None) # not rawcommit
633 extra = extra.copy()
642 extra = extra.copy()
634
643
635 if use_dirstate:
644 if use_dirstate:
636 if files:
645 if files:
637 for f in files:
646 for f in files:
638 s = self.dirstate.state(f)
647 s = self.dirstate.state(f)
639 if s in 'nmai':
648 if s in 'nmai':
640 commit.append(f)
649 commit.append(f)
641 elif s == 'r':
650 elif s == 'r':
642 remove.append(f)
651 remove.append(f)
643 else:
652 else:
644 self.ui.warn(_("%s not tracked!\n") % f)
653 self.ui.warn(_("%s not tracked!\n") % f)
645 else:
654 else:
646 changes = self.status(match=match)[:5]
655 changes = self.status(match=match)[:5]
647 modified, added, removed, deleted, unknown = changes
656 modified, added, removed, deleted, unknown = changes
648 commit = modified + added
657 commit = modified + added
649 remove = removed
658 remove = removed
650 else:
659 else:
651 commit = files
660 commit = files
652
661
653 if use_dirstate:
662 if use_dirstate:
654 p1, p2 = self.dirstate.parents()
663 p1, p2 = self.dirstate.parents()
655 update_dirstate = True
664 update_dirstate = True
656 else:
665 else:
657 p1, p2 = p1, p2 or nullid
666 p1, p2 = p1, p2 or nullid
658 update_dirstate = (self.dirstate.parents()[0] == p1)
667 update_dirstate = (self.dirstate.parents()[0] == p1)
659
668
660 c1 = self.changelog.read(p1)
669 c1 = self.changelog.read(p1)
661 c2 = self.changelog.read(p2)
670 c2 = self.changelog.read(p2)
662 m1 = self.manifest.read(c1[0]).copy()
671 m1 = self.manifest.read(c1[0]).copy()
663 m2 = self.manifest.read(c2[0])
672 m2 = self.manifest.read(c2[0])
664
673
665 if use_dirstate:
674 if use_dirstate:
666 branchname = util.fromlocal(self.workingctx().branch())
675 branchname = util.fromlocal(self.workingctx().branch())
667 else:
676 else:
668 branchname = ""
677 branchname = ""
669
678
670 if use_dirstate:
679 if use_dirstate:
671 oldname = c1[5].get("branch", "") # stored in UTF-8
680 oldname = c1[5].get("branch", "") # stored in UTF-8
672 if not commit and not remove and not force and p2 == nullid and \
681 if not commit and not remove and not force and p2 == nullid and \
673 branchname == oldname:
682 branchname == oldname:
674 self.ui.status(_("nothing changed\n"))
683 self.ui.status(_("nothing changed\n"))
675 return None
684 return None
676
685
677 xp1 = hex(p1)
686 xp1 = hex(p1)
678 if p2 == nullid: xp2 = ''
687 if p2 == nullid: xp2 = ''
679 else: xp2 = hex(p2)
688 else: xp2 = hex(p2)
680
689
681 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
690 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
682
691
683 if not wlock:
692 if not wlock:
684 wlock = self.wlock()
693 wlock = self.wlock()
685 if not lock:
694 if not lock:
686 lock = self.lock()
695 lock = self.lock()
687 tr = self.transaction()
696 tr = self.transaction()
688
697
689 # check in files
698 # check in files
690 new = {}
699 new = {}
691 linkrev = self.changelog.count()
700 linkrev = self.changelog.count()
692 commit.sort()
701 commit.sort()
693 for f in commit:
702 for f in commit:
694 self.ui.note(f + "\n")
703 self.ui.note(f + "\n")
695 try:
704 try:
696 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
705 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
697 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
706 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
698 except IOError:
707 except IOError:
699 if use_dirstate:
708 if use_dirstate:
700 self.ui.warn(_("trouble committing %s!\n") % f)
709 self.ui.warn(_("trouble committing %s!\n") % f)
701 raise
710 raise
702 else:
711 else:
703 remove.append(f)
712 remove.append(f)
704
713
705 # update manifest
714 # update manifest
706 m1.update(new)
715 m1.update(new)
707 remove.sort()
716 remove.sort()
708
717
709 for f in remove:
718 for f in remove:
710 if f in m1:
719 if f in m1:
711 del m1[f]
720 del m1[f]
712 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
721 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
713
722
714 # add changeset
723 # add changeset
715 new = new.keys()
724 new = new.keys()
716 new.sort()
725 new.sort()
717
726
718 user = user or self.ui.username()
727 user = user or self.ui.username()
719 if not text or force_editor:
728 if not text or force_editor:
720 edittext = []
729 edittext = []
721 if text:
730 if text:
722 edittext.append(text)
731 edittext.append(text)
723 edittext.append("")
732 edittext.append("")
724 edittext.append("HG: user: %s" % user)
733 edittext.append("HG: user: %s" % user)
725 if p2 != nullid:
734 if p2 != nullid:
726 edittext.append("HG: branch merge")
735 edittext.append("HG: branch merge")
727 edittext.extend(["HG: changed %s" % f for f in changed])
736 edittext.extend(["HG: changed %s" % f for f in changed])
728 edittext.extend(["HG: removed %s" % f for f in remove])
737 edittext.extend(["HG: removed %s" % f for f in remove])
729 if not changed and not remove:
738 if not changed and not remove:
730 edittext.append("HG: no files changed")
739 edittext.append("HG: no files changed")
731 edittext.append("")
740 edittext.append("")
732 # run editor in the repository root
741 # run editor in the repository root
733 olddir = os.getcwd()
742 olddir = os.getcwd()
734 os.chdir(self.root)
743 os.chdir(self.root)
735 text = self.ui.edit("\n".join(edittext), user)
744 text = self.ui.edit("\n".join(edittext), user)
736 os.chdir(olddir)
745 os.chdir(olddir)
737
746
738 lines = [line.rstrip() for line in text.rstrip().splitlines()]
747 lines = [line.rstrip() for line in text.rstrip().splitlines()]
739 while lines and not lines[0]:
748 while lines and not lines[0]:
740 del lines[0]
749 del lines[0]
741 if not lines:
750 if not lines:
742 return None
751 return None
743 text = '\n'.join(lines)
752 text = '\n'.join(lines)
744 if branchname:
753 if branchname:
745 extra["branch"] = branchname
754 extra["branch"] = branchname
746 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
755 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
747 user, date, extra)
756 user, date, extra)
748 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
757 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
749 parent2=xp2)
758 parent2=xp2)
750 tr.close()
759 tr.close()
751
760
752 if use_dirstate or update_dirstate:
761 if use_dirstate or update_dirstate:
753 self.dirstate.setparents(n)
762 self.dirstate.setparents(n)
754 if use_dirstate:
763 if use_dirstate:
755 self.dirstate.update(new, "n")
764 self.dirstate.update(new, "n")
756 self.dirstate.forget(remove)
765 self.dirstate.forget(remove)
757
766
758 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
767 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
759 return n
768 return n
760
769
761 def walk(self, node=None, files=[], match=util.always, badmatch=None):
770 def walk(self, node=None, files=[], match=util.always, badmatch=None):
762 '''
771 '''
763 walk recursively through the directory tree or a given
772 walk recursively through the directory tree or a given
764 changeset, finding all files matched by the match
773 changeset, finding all files matched by the match
765 function
774 function
766
775
767 results are yielded in a tuple (src, filename), where src
776 results are yielded in a tuple (src, filename), where src
768 is one of:
777 is one of:
769 'f' the file was found in the directory tree
778 'f' the file was found in the directory tree
770 'm' the file was only in the dirstate and not in the tree
779 'm' the file was only in the dirstate and not in the tree
771 'b' file was not found and matched badmatch
780 'b' file was not found and matched badmatch
772 '''
781 '''
773
782
774 if node:
783 if node:
775 fdict = dict.fromkeys(files)
784 fdict = dict.fromkeys(files)
776 for fn in self.manifest.read(self.changelog.read(node)[0]):
785 for fn in self.manifest.read(self.changelog.read(node)[0]):
777 for ffn in fdict:
786 for ffn in fdict:
778 # match if the file is the exact name or a directory
787 # match if the file is the exact name or a directory
779 if ffn == fn or fn.startswith("%s/" % ffn):
788 if ffn == fn or fn.startswith("%s/" % ffn):
780 del fdict[ffn]
789 del fdict[ffn]
781 break
790 break
782 if match(fn):
791 if match(fn):
783 yield 'm', fn
792 yield 'm', fn
784 for fn in fdict:
793 for fn in fdict:
785 if badmatch and badmatch(fn):
794 if badmatch and badmatch(fn):
786 if match(fn):
795 if match(fn):
787 yield 'b', fn
796 yield 'b', fn
788 else:
797 else:
789 self.ui.warn(_('%s: No such file in rev %s\n') % (
798 self.ui.warn(_('%s: No such file in rev %s\n') % (
790 util.pathto(self.getcwd(), fn), short(node)))
799 util.pathto(self.getcwd(), fn), short(node)))
791 else:
800 else:
792 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
801 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
793 yield src, fn
802 yield src, fn
794
803
795 def status(self, node1=None, node2=None, files=[], match=util.always,
804 def status(self, node1=None, node2=None, files=[], match=util.always,
796 wlock=None, list_ignored=False, list_clean=False):
805 wlock=None, list_ignored=False, list_clean=False):
797 """return status of files between two nodes or node and working directory
806 """return status of files between two nodes or node and working directory
798
807
799 If node1 is None, use the first dirstate parent instead.
808 If node1 is None, use the first dirstate parent instead.
800 If node2 is None, compare node1 with working directory.
809 If node2 is None, compare node1 with working directory.
801 """
810 """
802
811
803 def fcmp(fn, mf):
812 def fcmp(fn, mf):
804 t1 = self.wread(fn)
813 t1 = self.wread(fn)
805 return self.file(fn).cmp(mf.get(fn, nullid), t1)
814 return self.file(fn).cmp(mf.get(fn, nullid), t1)
806
815
807 def mfmatches(node):
816 def mfmatches(node):
808 change = self.changelog.read(node)
817 change = self.changelog.read(node)
809 mf = self.manifest.read(change[0]).copy()
818 mf = self.manifest.read(change[0]).copy()
810 for fn in mf.keys():
819 for fn in mf.keys():
811 if not match(fn):
820 if not match(fn):
812 del mf[fn]
821 del mf[fn]
813 return mf
822 return mf
814
823
815 modified, added, removed, deleted, unknown = [], [], [], [], []
824 modified, added, removed, deleted, unknown = [], [], [], [], []
816 ignored, clean = [], []
825 ignored, clean = [], []
817
826
818 compareworking = False
827 compareworking = False
819 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
828 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
820 compareworking = True
829 compareworking = True
821
830
822 if not compareworking:
831 if not compareworking:
823 # read the manifest from node1 before the manifest from node2,
832 # read the manifest from node1 before the manifest from node2,
824 # so that we'll hit the manifest cache if we're going through
833 # so that we'll hit the manifest cache if we're going through
825 # all the revisions in parent->child order.
834 # all the revisions in parent->child order.
826 mf1 = mfmatches(node1)
835 mf1 = mfmatches(node1)
827
836
828 # are we comparing the working directory?
837 # are we comparing the working directory?
829 if not node2:
838 if not node2:
830 if not wlock:
839 if not wlock:
831 try:
840 try:
832 wlock = self.wlock(wait=0)
841 wlock = self.wlock(wait=0)
833 except lock.LockException:
842 except lock.LockException:
834 wlock = None
843 wlock = None
835 (lookup, modified, added, removed, deleted, unknown,
844 (lookup, modified, added, removed, deleted, unknown,
836 ignored, clean) = self.dirstate.status(files, match,
845 ignored, clean) = self.dirstate.status(files, match,
837 list_ignored, list_clean)
846 list_ignored, list_clean)
838
847
839 # are we comparing working dir against its parent?
848 # are we comparing working dir against its parent?
840 if compareworking:
849 if compareworking:
841 if lookup:
850 if lookup:
842 # do a full compare of any files that might have changed
851 # do a full compare of any files that might have changed
843 mf2 = mfmatches(self.dirstate.parents()[0])
852 mf2 = mfmatches(self.dirstate.parents()[0])
844 for f in lookup:
853 for f in lookup:
845 if fcmp(f, mf2):
854 if fcmp(f, mf2):
846 modified.append(f)
855 modified.append(f)
847 else:
856 else:
848 clean.append(f)
857 clean.append(f)
849 if wlock is not None:
858 if wlock is not None:
850 self.dirstate.update([f], "n")
859 self.dirstate.update([f], "n")
851 else:
860 else:
852 # we are comparing working dir against non-parent
861 # we are comparing working dir against non-parent
853 # generate a pseudo-manifest for the working dir
862 # generate a pseudo-manifest for the working dir
854 # XXX: create it in dirstate.py ?
863 # XXX: create it in dirstate.py ?
855 mf2 = mfmatches(self.dirstate.parents()[0])
864 mf2 = mfmatches(self.dirstate.parents()[0])
856 for f in lookup + modified + added:
865 for f in lookup + modified + added:
857 mf2[f] = ""
866 mf2[f] = ""
858 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
867 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
859 for f in removed:
868 for f in removed:
860 if f in mf2:
869 if f in mf2:
861 del mf2[f]
870 del mf2[f]
862 else:
871 else:
863 # we are comparing two revisions
872 # we are comparing two revisions
864 mf2 = mfmatches(node2)
873 mf2 = mfmatches(node2)
865
874
866 if not compareworking:
875 if not compareworking:
867 # flush lists from dirstate before comparing manifests
876 # flush lists from dirstate before comparing manifests
868 modified, added, clean = [], [], []
877 modified, added, clean = [], [], []
869
878
870 # make sure to sort the files so we talk to the disk in a
879 # make sure to sort the files so we talk to the disk in a
871 # reasonable order
880 # reasonable order
872 mf2keys = mf2.keys()
881 mf2keys = mf2.keys()
873 mf2keys.sort()
882 mf2keys.sort()
874 for fn in mf2keys:
883 for fn in mf2keys:
875 if mf1.has_key(fn):
884 if mf1.has_key(fn):
876 if mf1.flags(fn) != mf2.flags(fn) or \
885 if mf1.flags(fn) != mf2.flags(fn) or \
877 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
886 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
878 modified.append(fn)
887 modified.append(fn)
879 elif list_clean:
888 elif list_clean:
880 clean.append(fn)
889 clean.append(fn)
881 del mf1[fn]
890 del mf1[fn]
882 else:
891 else:
883 added.append(fn)
892 added.append(fn)
884
893
885 removed = mf1.keys()
894 removed = mf1.keys()
886
895
887 # sort and return results:
896 # sort and return results:
888 for l in modified, added, removed, deleted, unknown, ignored, clean:
897 for l in modified, added, removed, deleted, unknown, ignored, clean:
889 l.sort()
898 l.sort()
890 return (modified, added, removed, deleted, unknown, ignored, clean)
899 return (modified, added, removed, deleted, unknown, ignored, clean)
891
900
892 def add(self, list, wlock=None):
901 def add(self, list, wlock=None):
893 if not wlock:
902 if not wlock:
894 wlock = self.wlock()
903 wlock = self.wlock()
895 for f in list:
904 for f in list:
896 p = self.wjoin(f)
905 p = self.wjoin(f)
897 if not os.path.exists(p):
906 if not os.path.exists(p):
898 self.ui.warn(_("%s does not exist!\n") % f)
907 self.ui.warn(_("%s does not exist!\n") % f)
899 elif not os.path.isfile(p):
908 elif not os.path.isfile(p):
900 self.ui.warn(_("%s not added: only files supported currently\n")
909 self.ui.warn(_("%s not added: only files supported currently\n")
901 % f)
910 % f)
902 elif self.dirstate.state(f) in 'an':
911 elif self.dirstate.state(f) in 'an':
903 self.ui.warn(_("%s already tracked!\n") % f)
912 self.ui.warn(_("%s already tracked!\n") % f)
904 else:
913 else:
905 self.dirstate.update([f], "a")
914 self.dirstate.update([f], "a")
906
915
907 def forget(self, list, wlock=None):
916 def forget(self, list, wlock=None):
908 if not wlock:
917 if not wlock:
909 wlock = self.wlock()
918 wlock = self.wlock()
910 for f in list:
919 for f in list:
911 if self.dirstate.state(f) not in 'ai':
920 if self.dirstate.state(f) not in 'ai':
912 self.ui.warn(_("%s not added!\n") % f)
921 self.ui.warn(_("%s not added!\n") % f)
913 else:
922 else:
914 self.dirstate.forget([f])
923 self.dirstate.forget([f])
915
924
916 def remove(self, list, unlink=False, wlock=None):
925 def remove(self, list, unlink=False, wlock=None):
917 if unlink:
926 if unlink:
918 for f in list:
927 for f in list:
919 try:
928 try:
920 util.unlink(self.wjoin(f))
929 util.unlink(self.wjoin(f))
921 except OSError, inst:
930 except OSError, inst:
922 if inst.errno != errno.ENOENT:
931 if inst.errno != errno.ENOENT:
923 raise
932 raise
924 if not wlock:
933 if not wlock:
925 wlock = self.wlock()
934 wlock = self.wlock()
926 for f in list:
935 for f in list:
927 p = self.wjoin(f)
936 p = self.wjoin(f)
928 if os.path.exists(p):
937 if os.path.exists(p):
929 self.ui.warn(_("%s still exists!\n") % f)
938 self.ui.warn(_("%s still exists!\n") % f)
930 elif self.dirstate.state(f) == 'a':
939 elif self.dirstate.state(f) == 'a':
931 self.dirstate.forget([f])
940 self.dirstate.forget([f])
932 elif f not in self.dirstate:
941 elif f not in self.dirstate:
933 self.ui.warn(_("%s not tracked!\n") % f)
942 self.ui.warn(_("%s not tracked!\n") % f)
934 else:
943 else:
935 self.dirstate.update([f], "r")
944 self.dirstate.update([f], "r")
936
945
937 def undelete(self, list, wlock=None):
946 def undelete(self, list, wlock=None):
938 p = self.dirstate.parents()[0]
947 p = self.dirstate.parents()[0]
939 mn = self.changelog.read(p)[0]
948 mn = self.changelog.read(p)[0]
940 m = self.manifest.read(mn)
949 m = self.manifest.read(mn)
941 if not wlock:
950 if not wlock:
942 wlock = self.wlock()
951 wlock = self.wlock()
943 for f in list:
952 for f in list:
944 if self.dirstate.state(f) not in "r":
953 if self.dirstate.state(f) not in "r":
945 self.ui.warn("%s not removed!\n" % f)
954 self.ui.warn("%s not removed!\n" % f)
946 else:
955 else:
947 t = self.file(f).read(m[f])
956 t = self.file(f).read(m[f])
948 self.wwrite(f, t)
957 self.wwrite(f, t)
949 util.set_exec(self.wjoin(f), m.execf(f))
958 util.set_exec(self.wjoin(f), m.execf(f))
950 self.dirstate.update([f], "n")
959 self.dirstate.update([f], "n")
951
960
952 def copy(self, source, dest, wlock=None):
961 def copy(self, source, dest, wlock=None):
953 p = self.wjoin(dest)
962 p = self.wjoin(dest)
954 if not os.path.exists(p):
963 if not os.path.exists(p):
955 self.ui.warn(_("%s does not exist!\n") % dest)
964 self.ui.warn(_("%s does not exist!\n") % dest)
956 elif not os.path.isfile(p):
965 elif not os.path.isfile(p):
957 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
966 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
958 else:
967 else:
959 if not wlock:
968 if not wlock:
960 wlock = self.wlock()
969 wlock = self.wlock()
961 if self.dirstate.state(dest) == '?':
970 if self.dirstate.state(dest) == '?':
962 self.dirstate.update([dest], "a")
971 self.dirstate.update([dest], "a")
963 self.dirstate.copy(source, dest)
972 self.dirstate.copy(source, dest)
964
973
965 def heads(self, start=None):
974 def heads(self, start=None):
966 heads = self.changelog.heads(start)
975 heads = self.changelog.heads(start)
967 # sort the output in rev descending order
976 # sort the output in rev descending order
968 heads = [(-self.changelog.rev(h), h) for h in heads]
977 heads = [(-self.changelog.rev(h), h) for h in heads]
969 heads.sort()
978 heads.sort()
970 return [n for (r, n) in heads]
979 return [n for (r, n) in heads]
971
980
972 # branchlookup returns a dict giving a list of branches for
981 # branchlookup returns a dict giving a list of branches for
973 # each head. A branch is defined as the tag of a node or
982 # each head. A branch is defined as the tag of a node or
974 # the branch of the node's parents. If a node has multiple
983 # the branch of the node's parents. If a node has multiple
975 # branch tags, tags are eliminated if they are visible from other
984 # branch tags, tags are eliminated if they are visible from other
976 # branch tags.
985 # branch tags.
977 #
986 #
978 # So, for this graph: a->b->c->d->e
987 # So, for this graph: a->b->c->d->e
979 # \ /
988 # \ /
980 # aa -----/
989 # aa -----/
981 # a has tag 2.6.12
990 # a has tag 2.6.12
982 # d has tag 2.6.13
991 # d has tag 2.6.13
983 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
992 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
984 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
993 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
985 # from the list.
994 # from the list.
986 #
995 #
987 # It is possible that more than one head will have the same branch tag.
996 # It is possible that more than one head will have the same branch tag.
988 # callers need to check the result for multiple heads under the same
997 # callers need to check the result for multiple heads under the same
989 # branch tag if that is a problem for them (ie checkout of a specific
998 # branch tag if that is a problem for them (ie checkout of a specific
990 # branch).
999 # branch).
991 #
1000 #
992 # passing in a specific branch will limit the depth of the search
1001 # passing in a specific branch will limit the depth of the search
993 # through the parents. It won't limit the branches returned in the
1002 # through the parents. It won't limit the branches returned in the
994 # result though.
1003 # result though.
995 def branchlookup(self, heads=None, branch=None):
1004 def branchlookup(self, heads=None, branch=None):
996 if not heads:
1005 if not heads:
997 heads = self.heads()
1006 heads = self.heads()
998 headt = [ h for h in heads ]
1007 headt = [ h for h in heads ]
999 chlog = self.changelog
1008 chlog = self.changelog
1000 branches = {}
1009 branches = {}
1001 merges = []
1010 merges = []
1002 seenmerge = {}
1011 seenmerge = {}
1003
1012
1004 # traverse the tree once for each head, recording in the branches
1013 # traverse the tree once for each head, recording in the branches
1005 # dict which tags are visible from this head. The branches
1014 # dict which tags are visible from this head. The branches
1006 # dict also records which tags are visible from each tag
1015 # dict also records which tags are visible from each tag
1007 # while we traverse.
1016 # while we traverse.
1008 while headt or merges:
1017 while headt or merges:
1009 if merges:
1018 if merges:
1010 n, found = merges.pop()
1019 n, found = merges.pop()
1011 visit = [n]
1020 visit = [n]
1012 else:
1021 else:
1013 h = headt.pop()
1022 h = headt.pop()
1014 visit = [h]
1023 visit = [h]
1015 found = [h]
1024 found = [h]
1016 seen = {}
1025 seen = {}
1017 while visit:
1026 while visit:
1018 n = visit.pop()
1027 n = visit.pop()
1019 if n in seen:
1028 if n in seen:
1020 continue
1029 continue
1021 pp = chlog.parents(n)
1030 pp = chlog.parents(n)
1022 tags = self.nodetags(n)
1031 tags = self.nodetags(n)
1023 if tags:
1032 if tags:
1024 for x in tags:
1033 for x in tags:
1025 if x == 'tip':
1034 if x == 'tip':
1026 continue
1035 continue
1027 for f in found:
1036 for f in found:
1028 branches.setdefault(f, {})[n] = 1
1037 branches.setdefault(f, {})[n] = 1
1029 branches.setdefault(n, {})[n] = 1
1038 branches.setdefault(n, {})[n] = 1
1030 break
1039 break
1031 if n not in found:
1040 if n not in found:
1032 found.append(n)
1041 found.append(n)
1033 if branch in tags:
1042 if branch in tags:
1034 continue
1043 continue
1035 seen[n] = 1
1044 seen[n] = 1
1036 if pp[1] != nullid and n not in seenmerge:
1045 if pp[1] != nullid and n not in seenmerge:
1037 merges.append((pp[1], [x for x in found]))
1046 merges.append((pp[1], [x for x in found]))
1038 seenmerge[n] = 1
1047 seenmerge[n] = 1
1039 if pp[0] != nullid:
1048 if pp[0] != nullid:
1040 visit.append(pp[0])
1049 visit.append(pp[0])
1041 # traverse the branches dict, eliminating branch tags from each
1050 # traverse the branches dict, eliminating branch tags from each
1042 # head that are visible from another branch tag for that head.
1051 # head that are visible from another branch tag for that head.
1043 out = {}
1052 out = {}
1044 viscache = {}
1053 viscache = {}
1045 for h in heads:
1054 for h in heads:
1046 def visible(node):
1055 def visible(node):
1047 if node in viscache:
1056 if node in viscache:
1048 return viscache[node]
1057 return viscache[node]
1049 ret = {}
1058 ret = {}
1050 visit = [node]
1059 visit = [node]
1051 while visit:
1060 while visit:
1052 x = visit.pop()
1061 x = visit.pop()
1053 if x in viscache:
1062 if x in viscache:
1054 ret.update(viscache[x])
1063 ret.update(viscache[x])
1055 elif x not in ret:
1064 elif x not in ret:
1056 ret[x] = 1
1065 ret[x] = 1
1057 if x in branches:
1066 if x in branches:
1058 visit[len(visit):] = branches[x].keys()
1067 visit[len(visit):] = branches[x].keys()
1059 viscache[node] = ret
1068 viscache[node] = ret
1060 return ret
1069 return ret
1061 if h not in branches:
1070 if h not in branches:
1062 continue
1071 continue
1063 # O(n^2), but somewhat limited. This only searches the
1072 # O(n^2), but somewhat limited. This only searches the
1064 # tags visible from a specific head, not all the tags in the
1073 # tags visible from a specific head, not all the tags in the
1065 # whole repo.
1074 # whole repo.
1066 for b in branches[h]:
1075 for b in branches[h]:
1067 vis = False
1076 vis = False
1068 for bb in branches[h].keys():
1077 for bb in branches[h].keys():
1069 if b != bb:
1078 if b != bb:
1070 if b in visible(bb):
1079 if b in visible(bb):
1071 vis = True
1080 vis = True
1072 break
1081 break
1073 if not vis:
1082 if not vis:
1074 l = out.setdefault(h, [])
1083 l = out.setdefault(h, [])
1075 l[len(l):] = self.nodetags(b)
1084 l[len(l):] = self.nodetags(b)
1076 return out
1085 return out
1077
1086
1078 def branches(self, nodes):
1087 def branches(self, nodes):
1079 if not nodes:
1088 if not nodes:
1080 nodes = [self.changelog.tip()]
1089 nodes = [self.changelog.tip()]
1081 b = []
1090 b = []
1082 for n in nodes:
1091 for n in nodes:
1083 t = n
1092 t = n
1084 while 1:
1093 while 1:
1085 p = self.changelog.parents(n)
1094 p = self.changelog.parents(n)
1086 if p[1] != nullid or p[0] == nullid:
1095 if p[1] != nullid or p[0] == nullid:
1087 b.append((t, n, p[0], p[1]))
1096 b.append((t, n, p[0], p[1]))
1088 break
1097 break
1089 n = p[0]
1098 n = p[0]
1090 return b
1099 return b
1091
1100
1092 def between(self, pairs):
1101 def between(self, pairs):
1093 r = []
1102 r = []
1094
1103
1095 for top, bottom in pairs:
1104 for top, bottom in pairs:
1096 n, l, i = top, [], 0
1105 n, l, i = top, [], 0
1097 f = 1
1106 f = 1
1098
1107
1099 while n != bottom:
1108 while n != bottom:
1100 p = self.changelog.parents(n)[0]
1109 p = self.changelog.parents(n)[0]
1101 if i == f:
1110 if i == f:
1102 l.append(n)
1111 l.append(n)
1103 f = f * 2
1112 f = f * 2
1104 n = p
1113 n = p
1105 i += 1
1114 i += 1
1106
1115
1107 r.append(l)
1116 r.append(l)
1108
1117
1109 return r
1118 return r
1110
1119
1111 def findincoming(self, remote, base=None, heads=None, force=False):
1120 def findincoming(self, remote, base=None, heads=None, force=False):
1112 """Return list of roots of the subsets of missing nodes from remote
1121 """Return list of roots of the subsets of missing nodes from remote
1113
1122
1114 If base dict is specified, assume that these nodes and their parents
1123 If base dict is specified, assume that these nodes and their parents
1115 exist on the remote side and that no child of a node of base exists
1124 exist on the remote side and that no child of a node of base exists
1116 in both remote and self.
1125 in both remote and self.
1117 Furthermore base will be updated to include the nodes that exists
1126 Furthermore base will be updated to include the nodes that exists
1118 in self and remote but no children exists in self and remote.
1127 in self and remote but no children exists in self and remote.
1119 If a list of heads is specified, return only nodes which are heads
1128 If a list of heads is specified, return only nodes which are heads
1120 or ancestors of these heads.
1129 or ancestors of these heads.
1121
1130
1122 All the ancestors of base are in self and in remote.
1131 All the ancestors of base are in self and in remote.
1123 All the descendants of the list returned are missing in self.
1132 All the descendants of the list returned are missing in self.
1124 (and so we know that the rest of the nodes are missing in remote, see
1133 (and so we know that the rest of the nodes are missing in remote, see
1125 outgoing)
1134 outgoing)
1126 """
1135 """
1127 m = self.changelog.nodemap
1136 m = self.changelog.nodemap
1128 search = []
1137 search = []
1129 fetch = {}
1138 fetch = {}
1130 seen = {}
1139 seen = {}
1131 seenbranch = {}
1140 seenbranch = {}
1132 if base == None:
1141 if base == None:
1133 base = {}
1142 base = {}
1134
1143
1135 if not heads:
1144 if not heads:
1136 heads = remote.heads()
1145 heads = remote.heads()
1137
1146
1138 if self.changelog.tip() == nullid:
1147 if self.changelog.tip() == nullid:
1139 base[nullid] = 1
1148 base[nullid] = 1
1140 if heads != [nullid]:
1149 if heads != [nullid]:
1141 return [nullid]
1150 return [nullid]
1142 return []
1151 return []
1143
1152
1144 # assume we're closer to the tip than the root
1153 # assume we're closer to the tip than the root
1145 # and start by examining the heads
1154 # and start by examining the heads
1146 self.ui.status(_("searching for changes\n"))
1155 self.ui.status(_("searching for changes\n"))
1147
1156
1148 unknown = []
1157 unknown = []
1149 for h in heads:
1158 for h in heads:
1150 if h not in m:
1159 if h not in m:
1151 unknown.append(h)
1160 unknown.append(h)
1152 else:
1161 else:
1153 base[h] = 1
1162 base[h] = 1
1154
1163
1155 if not unknown:
1164 if not unknown:
1156 return []
1165 return []
1157
1166
1158 req = dict.fromkeys(unknown)
1167 req = dict.fromkeys(unknown)
1159 reqcnt = 0
1168 reqcnt = 0
1160
1169
1161 # search through remote branches
1170 # search through remote branches
1162 # a 'branch' here is a linear segment of history, with four parts:
1171 # a 'branch' here is a linear segment of history, with four parts:
1163 # head, root, first parent, second parent
1172 # head, root, first parent, second parent
1164 # (a branch always has two parents (or none) by definition)
1173 # (a branch always has two parents (or none) by definition)
1165 unknown = remote.branches(unknown)
1174 unknown = remote.branches(unknown)
1166 while unknown:
1175 while unknown:
1167 r = []
1176 r = []
1168 while unknown:
1177 while unknown:
1169 n = unknown.pop(0)
1178 n = unknown.pop(0)
1170 if n[0] in seen:
1179 if n[0] in seen:
1171 continue
1180 continue
1172
1181
1173 self.ui.debug(_("examining %s:%s\n")
1182 self.ui.debug(_("examining %s:%s\n")
1174 % (short(n[0]), short(n[1])))
1183 % (short(n[0]), short(n[1])))
1175 if n[0] == nullid: # found the end of the branch
1184 if n[0] == nullid: # found the end of the branch
1176 pass
1185 pass
1177 elif n in seenbranch:
1186 elif n in seenbranch:
1178 self.ui.debug(_("branch already found\n"))
1187 self.ui.debug(_("branch already found\n"))
1179 continue
1188 continue
1180 elif n[1] and n[1] in m: # do we know the base?
1189 elif n[1] and n[1] in m: # do we know the base?
1181 self.ui.debug(_("found incomplete branch %s:%s\n")
1190 self.ui.debug(_("found incomplete branch %s:%s\n")
1182 % (short(n[0]), short(n[1])))
1191 % (short(n[0]), short(n[1])))
1183 search.append(n) # schedule branch range for scanning
1192 search.append(n) # schedule branch range for scanning
1184 seenbranch[n] = 1
1193 seenbranch[n] = 1
1185 else:
1194 else:
1186 if n[1] not in seen and n[1] not in fetch:
1195 if n[1] not in seen and n[1] not in fetch:
1187 if n[2] in m and n[3] in m:
1196 if n[2] in m and n[3] in m:
1188 self.ui.debug(_("found new changeset %s\n") %
1197 self.ui.debug(_("found new changeset %s\n") %
1189 short(n[1]))
1198 short(n[1]))
1190 fetch[n[1]] = 1 # earliest unknown
1199 fetch[n[1]] = 1 # earliest unknown
1191 for p in n[2:4]:
1200 for p in n[2:4]:
1192 if p in m:
1201 if p in m:
1193 base[p] = 1 # latest known
1202 base[p] = 1 # latest known
1194
1203
1195 for p in n[2:4]:
1204 for p in n[2:4]:
1196 if p not in req and p not in m:
1205 if p not in req and p not in m:
1197 r.append(p)
1206 r.append(p)
1198 req[p] = 1
1207 req[p] = 1
1199 seen[n[0]] = 1
1208 seen[n[0]] = 1
1200
1209
1201 if r:
1210 if r:
1202 reqcnt += 1
1211 reqcnt += 1
1203 self.ui.debug(_("request %d: %s\n") %
1212 self.ui.debug(_("request %d: %s\n") %
1204 (reqcnt, " ".join(map(short, r))))
1213 (reqcnt, " ".join(map(short, r))))
1205 for p in xrange(0, len(r), 10):
1214 for p in xrange(0, len(r), 10):
1206 for b in remote.branches(r[p:p+10]):
1215 for b in remote.branches(r[p:p+10]):
1207 self.ui.debug(_("received %s:%s\n") %
1216 self.ui.debug(_("received %s:%s\n") %
1208 (short(b[0]), short(b[1])))
1217 (short(b[0]), short(b[1])))
1209 unknown.append(b)
1218 unknown.append(b)
1210
1219
1211 # do binary search on the branches we found
1220 # do binary search on the branches we found
1212 while search:
1221 while search:
1213 n = search.pop(0)
1222 n = search.pop(0)
1214 reqcnt += 1
1223 reqcnt += 1
1215 l = remote.between([(n[0], n[1])])[0]
1224 l = remote.between([(n[0], n[1])])[0]
1216 l.append(n[1])
1225 l.append(n[1])
1217 p = n[0]
1226 p = n[0]
1218 f = 1
1227 f = 1
1219 for i in l:
1228 for i in l:
1220 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1229 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1221 if i in m:
1230 if i in m:
1222 if f <= 2:
1231 if f <= 2:
1223 self.ui.debug(_("found new branch changeset %s\n") %
1232 self.ui.debug(_("found new branch changeset %s\n") %
1224 short(p))
1233 short(p))
1225 fetch[p] = 1
1234 fetch[p] = 1
1226 base[i] = 1
1235 base[i] = 1
1227 else:
1236 else:
1228 self.ui.debug(_("narrowed branch search to %s:%s\n")
1237 self.ui.debug(_("narrowed branch search to %s:%s\n")
1229 % (short(p), short(i)))
1238 % (short(p), short(i)))
1230 search.append((p, i))
1239 search.append((p, i))
1231 break
1240 break
1232 p, f = i, f * 2
1241 p, f = i, f * 2
1233
1242
1234 # sanity check our fetch list
1243 # sanity check our fetch list
1235 for f in fetch.keys():
1244 for f in fetch.keys():
1236 if f in m:
1245 if f in m:
1237 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1246 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1238
1247
1239 if base.keys() == [nullid]:
1248 if base.keys() == [nullid]:
1240 if force:
1249 if force:
1241 self.ui.warn(_("warning: repository is unrelated\n"))
1250 self.ui.warn(_("warning: repository is unrelated\n"))
1242 else:
1251 else:
1243 raise util.Abort(_("repository is unrelated"))
1252 raise util.Abort(_("repository is unrelated"))
1244
1253
1245 self.ui.debug(_("found new changesets starting at ") +
1254 self.ui.debug(_("found new changesets starting at ") +
1246 " ".join([short(f) for f in fetch]) + "\n")
1255 " ".join([short(f) for f in fetch]) + "\n")
1247
1256
1248 self.ui.debug(_("%d total queries\n") % reqcnt)
1257 self.ui.debug(_("%d total queries\n") % reqcnt)
1249
1258
1250 return fetch.keys()
1259 return fetch.keys()
1251
1260
1252 def findoutgoing(self, remote, base=None, heads=None, force=False):
1261 def findoutgoing(self, remote, base=None, heads=None, force=False):
1253 """Return list of nodes that are roots of subsets not in remote
1262 """Return list of nodes that are roots of subsets not in remote
1254
1263
1255 If base dict is specified, assume that these nodes and their parents
1264 If base dict is specified, assume that these nodes and their parents
1256 exist on the remote side.
1265 exist on the remote side.
1257 If a list of heads is specified, return only nodes which are heads
1266 If a list of heads is specified, return only nodes which are heads
1258 or ancestors of these heads, and return a second element which
1267 or ancestors of these heads, and return a second element which
1259 contains all remote heads which get new children.
1268 contains all remote heads which get new children.
1260 """
1269 """
1261 if base == None:
1270 if base == None:
1262 base = {}
1271 base = {}
1263 self.findincoming(remote, base, heads, force=force)
1272 self.findincoming(remote, base, heads, force=force)
1264
1273
1265 self.ui.debug(_("common changesets up to ")
1274 self.ui.debug(_("common changesets up to ")
1266 + " ".join(map(short, base.keys())) + "\n")
1275 + " ".join(map(short, base.keys())) + "\n")
1267
1276
1268 remain = dict.fromkeys(self.changelog.nodemap)
1277 remain = dict.fromkeys(self.changelog.nodemap)
1269
1278
1270 # prune everything remote has from the tree
1279 # prune everything remote has from the tree
1271 del remain[nullid]
1280 del remain[nullid]
1272 remove = base.keys()
1281 remove = base.keys()
1273 while remove:
1282 while remove:
1274 n = remove.pop(0)
1283 n = remove.pop(0)
1275 if n in remain:
1284 if n in remain:
1276 del remain[n]
1285 del remain[n]
1277 for p in self.changelog.parents(n):
1286 for p in self.changelog.parents(n):
1278 remove.append(p)
1287 remove.append(p)
1279
1288
1280 # find every node whose parents have been pruned
1289 # find every node whose parents have been pruned
1281 subset = []
1290 subset = []
1282 # find every remote head that will get new children
1291 # find every remote head that will get new children
1283 updated_heads = {}
1292 updated_heads = {}
1284 for n in remain:
1293 for n in remain:
1285 p1, p2 = self.changelog.parents(n)
1294 p1, p2 = self.changelog.parents(n)
1286 if p1 not in remain and p2 not in remain:
1295 if p1 not in remain and p2 not in remain:
1287 subset.append(n)
1296 subset.append(n)
1288 if heads:
1297 if heads:
1289 if p1 in heads:
1298 if p1 in heads:
1290 updated_heads[p1] = True
1299 updated_heads[p1] = True
1291 if p2 in heads:
1300 if p2 in heads:
1292 updated_heads[p2] = True
1301 updated_heads[p2] = True
1293
1302
1294 # this is the set of all roots we have to push
1303 # this is the set of all roots we have to push
1295 if heads:
1304 if heads:
1296 return subset, updated_heads.keys()
1305 return subset, updated_heads.keys()
1297 else:
1306 else:
1298 return subset
1307 return subset
1299
1308
1300 def pull(self, remote, heads=None, force=False, lock=None):
1309 def pull(self, remote, heads=None, force=False, lock=None):
1301 mylock = False
1310 mylock = False
1302 if not lock:
1311 if not lock:
1303 lock = self.lock()
1312 lock = self.lock()
1304 mylock = True
1313 mylock = True
1305
1314
1306 try:
1315 try:
1307 fetch = self.findincoming(remote, force=force)
1316 fetch = self.findincoming(remote, force=force)
1308 if fetch == [nullid]:
1317 if fetch == [nullid]:
1309 self.ui.status(_("requesting all changes\n"))
1318 self.ui.status(_("requesting all changes\n"))
1310
1319
1311 if not fetch:
1320 if not fetch:
1312 self.ui.status(_("no changes found\n"))
1321 self.ui.status(_("no changes found\n"))
1313 return 0
1322 return 0
1314
1323
1315 if heads is None:
1324 if heads is None:
1316 cg = remote.changegroup(fetch, 'pull')
1325 cg = remote.changegroup(fetch, 'pull')
1317 else:
1326 else:
1318 if 'changegroupsubset' not in remote.capabilities:
1327 if 'changegroupsubset' not in remote.capabilities:
1319 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1328 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1320 cg = remote.changegroupsubset(fetch, heads, 'pull')
1329 cg = remote.changegroupsubset(fetch, heads, 'pull')
1321 return self.addchangegroup(cg, 'pull', remote.url())
1330 return self.addchangegroup(cg, 'pull', remote.url())
1322 finally:
1331 finally:
1323 if mylock:
1332 if mylock:
1324 lock.release()
1333 lock.release()
1325
1334
1326 def push(self, remote, force=False, revs=None):
1335 def push(self, remote, force=False, revs=None):
1327 # there are two ways to push to remote repo:
1336 # there are two ways to push to remote repo:
1328 #
1337 #
1329 # addchangegroup assumes local user can lock remote
1338 # addchangegroup assumes local user can lock remote
1330 # repo (local filesystem, old ssh servers).
1339 # repo (local filesystem, old ssh servers).
1331 #
1340 #
1332 # unbundle assumes local user cannot lock remote repo (new ssh
1341 # unbundle assumes local user cannot lock remote repo (new ssh
1333 # servers, http servers).
1342 # servers, http servers).
1334
1343
1335 if remote.capable('unbundle'):
1344 if remote.capable('unbundle'):
1336 return self.push_unbundle(remote, force, revs)
1345 return self.push_unbundle(remote, force, revs)
1337 return self.push_addchangegroup(remote, force, revs)
1346 return self.push_addchangegroup(remote, force, revs)
1338
1347
1339 def prepush(self, remote, force, revs):
1348 def prepush(self, remote, force, revs):
1340 base = {}
1349 base = {}
1341 remote_heads = remote.heads()
1350 remote_heads = remote.heads()
1342 inc = self.findincoming(remote, base, remote_heads, force=force)
1351 inc = self.findincoming(remote, base, remote_heads, force=force)
1343
1352
1344 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1353 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1345 if revs is not None:
1354 if revs is not None:
1346 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1355 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1347 else:
1356 else:
1348 bases, heads = update, self.changelog.heads()
1357 bases, heads = update, self.changelog.heads()
1349
1358
1350 if not bases:
1359 if not bases:
1351 self.ui.status(_("no changes found\n"))
1360 self.ui.status(_("no changes found\n"))
1352 return None, 1
1361 return None, 1
1353 elif not force:
1362 elif not force:
1354 # check if we're creating new remote heads
1363 # check if we're creating new remote heads
1355 # to be a remote head after push, node must be either
1364 # to be a remote head after push, node must be either
1356 # - unknown locally
1365 # - unknown locally
1357 # - a local outgoing head descended from update
1366 # - a local outgoing head descended from update
1358 # - a remote head that's known locally and not
1367 # - a remote head that's known locally and not
1359 # ancestral to an outgoing head
1368 # ancestral to an outgoing head
1360
1369
1361 warn = 0
1370 warn = 0
1362
1371
1363 if remote_heads == [nullid]:
1372 if remote_heads == [nullid]:
1364 warn = 0
1373 warn = 0
1365 elif not revs and len(heads) > len(remote_heads):
1374 elif not revs and len(heads) > len(remote_heads):
1366 warn = 1
1375 warn = 1
1367 else:
1376 else:
1368 newheads = list(heads)
1377 newheads = list(heads)
1369 for r in remote_heads:
1378 for r in remote_heads:
1370 if r in self.changelog.nodemap:
1379 if r in self.changelog.nodemap:
1371 desc = self.changelog.heads(r)
1380 desc = self.changelog.heads(r)
1372 l = [h for h in heads if h in desc]
1381 l = [h for h in heads if h in desc]
1373 if not l:
1382 if not l:
1374 newheads.append(r)
1383 newheads.append(r)
1375 else:
1384 else:
1376 newheads.append(r)
1385 newheads.append(r)
1377 if len(newheads) > len(remote_heads):
1386 if len(newheads) > len(remote_heads):
1378 warn = 1
1387 warn = 1
1379
1388
1380 if warn:
1389 if warn:
1381 self.ui.warn(_("abort: push creates new remote branches!\n"))
1390 self.ui.warn(_("abort: push creates new remote branches!\n"))
1382 self.ui.status(_("(did you forget to merge?"
1391 self.ui.status(_("(did you forget to merge?"
1383 " use push -f to force)\n"))
1392 " use push -f to force)\n"))
1384 return None, 1
1393 return None, 1
1385 elif inc:
1394 elif inc:
1386 self.ui.warn(_("note: unsynced remote changes!\n"))
1395 self.ui.warn(_("note: unsynced remote changes!\n"))
1387
1396
1388
1397
1389 if revs is None:
1398 if revs is None:
1390 cg = self.changegroup(update, 'push')
1399 cg = self.changegroup(update, 'push')
1391 else:
1400 else:
1392 cg = self.changegroupsubset(update, revs, 'push')
1401 cg = self.changegroupsubset(update, revs, 'push')
1393 return cg, remote_heads
1402 return cg, remote_heads
1394
1403
1395 def push_addchangegroup(self, remote, force, revs):
1404 def push_addchangegroup(self, remote, force, revs):
1396 lock = remote.lock()
1405 lock = remote.lock()
1397
1406
1398 ret = self.prepush(remote, force, revs)
1407 ret = self.prepush(remote, force, revs)
1399 if ret[0] is not None:
1408 if ret[0] is not None:
1400 cg, remote_heads = ret
1409 cg, remote_heads = ret
1401 return remote.addchangegroup(cg, 'push', self.url())
1410 return remote.addchangegroup(cg, 'push', self.url())
1402 return ret[1]
1411 return ret[1]
1403
1412
1404 def push_unbundle(self, remote, force, revs):
1413 def push_unbundle(self, remote, force, revs):
1405 # local repo finds heads on server, finds out what revs it
1414 # local repo finds heads on server, finds out what revs it
1406 # must push. once revs transferred, if server finds it has
1415 # must push. once revs transferred, if server finds it has
1407 # different heads (someone else won commit/push race), server
1416 # different heads (someone else won commit/push race), server
1408 # aborts.
1417 # aborts.
1409
1418
1410 ret = self.prepush(remote, force, revs)
1419 ret = self.prepush(remote, force, revs)
1411 if ret[0] is not None:
1420 if ret[0] is not None:
1412 cg, remote_heads = ret
1421 cg, remote_heads = ret
1413 if force: remote_heads = ['force']
1422 if force: remote_heads = ['force']
1414 return remote.unbundle(cg, remote_heads, 'push')
1423 return remote.unbundle(cg, remote_heads, 'push')
1415 return ret[1]
1424 return ret[1]
1416
1425
1417 def changegroupinfo(self, nodes):
1426 def changegroupinfo(self, nodes):
1418 self.ui.note(_("%d changesets found\n") % len(nodes))
1427 self.ui.note(_("%d changesets found\n") % len(nodes))
1419 if self.ui.debugflag:
1428 if self.ui.debugflag:
1420 self.ui.debug(_("List of changesets:\n"))
1429 self.ui.debug(_("List of changesets:\n"))
1421 for node in nodes:
1430 for node in nodes:
1422 self.ui.debug("%s\n" % hex(node))
1431 self.ui.debug("%s\n" % hex(node))
1423
1432
1424 def changegroupsubset(self, bases, heads, source):
1433 def changegroupsubset(self, bases, heads, source):
1425 """This function generates a changegroup consisting of all the nodes
1434 """This function generates a changegroup consisting of all the nodes
1426 that are descendents of any of the bases, and ancestors of any of
1435 that are descendents of any of the bases, and ancestors of any of
1427 the heads.
1436 the heads.
1428
1437
1429 It is fairly complex as determining which filenodes and which
1438 It is fairly complex as determining which filenodes and which
1430 manifest nodes need to be included for the changeset to be complete
1439 manifest nodes need to be included for the changeset to be complete
1431 is non-trivial.
1440 is non-trivial.
1432
1441
1433 Another wrinkle is doing the reverse, figuring out which changeset in
1442 Another wrinkle is doing the reverse, figuring out which changeset in
1434 the changegroup a particular filenode or manifestnode belongs to."""
1443 the changegroup a particular filenode or manifestnode belongs to."""
1435
1444
1436 self.hook('preoutgoing', throw=True, source=source)
1445 self.hook('preoutgoing', throw=True, source=source)
1437
1446
1438 # Set up some initial variables
1447 # Set up some initial variables
1439 # Make it easy to refer to self.changelog
1448 # Make it easy to refer to self.changelog
1440 cl = self.changelog
1449 cl = self.changelog
1441 # msng is short for missing - compute the list of changesets in this
1450 # msng is short for missing - compute the list of changesets in this
1442 # changegroup.
1451 # changegroup.
1443 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1452 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1444 self.changegroupinfo(msng_cl_lst)
1453 self.changegroupinfo(msng_cl_lst)
1445 # Some bases may turn out to be superfluous, and some heads may be
1454 # Some bases may turn out to be superfluous, and some heads may be
1446 # too. nodesbetween will return the minimal set of bases and heads
1455 # too. nodesbetween will return the minimal set of bases and heads
1447 # necessary to re-create the changegroup.
1456 # necessary to re-create the changegroup.
1448
1457
1449 # Known heads are the list of heads that it is assumed the recipient
1458 # Known heads are the list of heads that it is assumed the recipient
1450 # of this changegroup will know about.
1459 # of this changegroup will know about.
1451 knownheads = {}
1460 knownheads = {}
1452 # We assume that all parents of bases are known heads.
1461 # We assume that all parents of bases are known heads.
1453 for n in bases:
1462 for n in bases:
1454 for p in cl.parents(n):
1463 for p in cl.parents(n):
1455 if p != nullid:
1464 if p != nullid:
1456 knownheads[p] = 1
1465 knownheads[p] = 1
1457 knownheads = knownheads.keys()
1466 knownheads = knownheads.keys()
1458 if knownheads:
1467 if knownheads:
1459 # Now that we know what heads are known, we can compute which
1468 # Now that we know what heads are known, we can compute which
1460 # changesets are known. The recipient must know about all
1469 # changesets are known. The recipient must know about all
1461 # changesets required to reach the known heads from the null
1470 # changesets required to reach the known heads from the null
1462 # changeset.
1471 # changeset.
1463 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1472 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1464 junk = None
1473 junk = None
1465 # Transform the list into an ersatz set.
1474 # Transform the list into an ersatz set.
1466 has_cl_set = dict.fromkeys(has_cl_set)
1475 has_cl_set = dict.fromkeys(has_cl_set)
1467 else:
1476 else:
1468 # If there were no known heads, the recipient cannot be assumed to
1477 # If there were no known heads, the recipient cannot be assumed to
1469 # know about any changesets.
1478 # know about any changesets.
1470 has_cl_set = {}
1479 has_cl_set = {}
1471
1480
1472 # Make it easy to refer to self.manifest
1481 # Make it easy to refer to self.manifest
1473 mnfst = self.manifest
1482 mnfst = self.manifest
1474 # We don't know which manifests are missing yet
1483 # We don't know which manifests are missing yet
1475 msng_mnfst_set = {}
1484 msng_mnfst_set = {}
1476 # Nor do we know which filenodes are missing.
1485 # Nor do we know which filenodes are missing.
1477 msng_filenode_set = {}
1486 msng_filenode_set = {}
1478
1487
1479 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1488 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1480 junk = None
1489 junk = None
1481
1490
1482 # A changeset always belongs to itself, so the changenode lookup
1491 # A changeset always belongs to itself, so the changenode lookup
1483 # function for a changenode is identity.
1492 # function for a changenode is identity.
1484 def identity(x):
1493 def identity(x):
1485 return x
1494 return x
1486
1495
1487 # A function generating function. Sets up an environment for the
1496 # A function generating function. Sets up an environment for the
1488 # inner function.
1497 # inner function.
1489 def cmp_by_rev_func(revlog):
1498 def cmp_by_rev_func(revlog):
1490 # Compare two nodes by their revision number in the environment's
1499 # Compare two nodes by their revision number in the environment's
1491 # revision history. Since the revision number both represents the
1500 # revision history. Since the revision number both represents the
1492 # most efficient order to read the nodes in, and represents a
1501 # most efficient order to read the nodes in, and represents a
1493 # topological sorting of the nodes, this function is often useful.
1502 # topological sorting of the nodes, this function is often useful.
1494 def cmp_by_rev(a, b):
1503 def cmp_by_rev(a, b):
1495 return cmp(revlog.rev(a), revlog.rev(b))
1504 return cmp(revlog.rev(a), revlog.rev(b))
1496 return cmp_by_rev
1505 return cmp_by_rev
1497
1506
1498 # If we determine that a particular file or manifest node must be a
1507 # If we determine that a particular file or manifest node must be a
1499 # node that the recipient of the changegroup will already have, we can
1508 # node that the recipient of the changegroup will already have, we can
1500 # also assume the recipient will have all the parents. This function
1509 # also assume the recipient will have all the parents. This function
1501 # prunes them from the set of missing nodes.
1510 # prunes them from the set of missing nodes.
1502 def prune_parents(revlog, hasset, msngset):
1511 def prune_parents(revlog, hasset, msngset):
1503 haslst = hasset.keys()
1512 haslst = hasset.keys()
1504 haslst.sort(cmp_by_rev_func(revlog))
1513 haslst.sort(cmp_by_rev_func(revlog))
1505 for node in haslst:
1514 for node in haslst:
1506 parentlst = [p for p in revlog.parents(node) if p != nullid]
1515 parentlst = [p for p in revlog.parents(node) if p != nullid]
1507 while parentlst:
1516 while parentlst:
1508 n = parentlst.pop()
1517 n = parentlst.pop()
1509 if n not in hasset:
1518 if n not in hasset:
1510 hasset[n] = 1
1519 hasset[n] = 1
1511 p = [p for p in revlog.parents(n) if p != nullid]
1520 p = [p for p in revlog.parents(n) if p != nullid]
1512 parentlst.extend(p)
1521 parentlst.extend(p)
1513 for n in hasset:
1522 for n in hasset:
1514 msngset.pop(n, None)
1523 msngset.pop(n, None)
1515
1524
1516 # This is a function generating function used to set up an environment
1525 # This is a function generating function used to set up an environment
1517 # for the inner function to execute in.
1526 # for the inner function to execute in.
1518 def manifest_and_file_collector(changedfileset):
1527 def manifest_and_file_collector(changedfileset):
1519 # This is an information gathering function that gathers
1528 # This is an information gathering function that gathers
1520 # information from each changeset node that goes out as part of
1529 # information from each changeset node that goes out as part of
1521 # the changegroup. The information gathered is a list of which
1530 # the changegroup. The information gathered is a list of which
1522 # manifest nodes are potentially required (the recipient may
1531 # manifest nodes are potentially required (the recipient may
1523 # already have them) and total list of all files which were
1532 # already have them) and total list of all files which were
1524 # changed in any changeset in the changegroup.
1533 # changed in any changeset in the changegroup.
1525 #
1534 #
1526 # We also remember the first changenode we saw any manifest
1535 # We also remember the first changenode we saw any manifest
1527 # referenced by so we can later determine which changenode 'owns'
1536 # referenced by so we can later determine which changenode 'owns'
1528 # the manifest.
1537 # the manifest.
1529 def collect_manifests_and_files(clnode):
1538 def collect_manifests_and_files(clnode):
1530 c = cl.read(clnode)
1539 c = cl.read(clnode)
1531 for f in c[3]:
1540 for f in c[3]:
1532 # This is to make sure we only have one instance of each
1541 # This is to make sure we only have one instance of each
1533 # filename string for each filename.
1542 # filename string for each filename.
1534 changedfileset.setdefault(f, f)
1543 changedfileset.setdefault(f, f)
1535 msng_mnfst_set.setdefault(c[0], clnode)
1544 msng_mnfst_set.setdefault(c[0], clnode)
1536 return collect_manifests_and_files
1545 return collect_manifests_and_files
1537
1546
1538 # Figure out which manifest nodes (of the ones we think might be part
1547 # Figure out which manifest nodes (of the ones we think might be part
1539 # of the changegroup) the recipient must know about and remove them
1548 # of the changegroup) the recipient must know about and remove them
1540 # from the changegroup.
1549 # from the changegroup.
1541 def prune_manifests():
1550 def prune_manifests():
1542 has_mnfst_set = {}
1551 has_mnfst_set = {}
1543 for n in msng_mnfst_set:
1552 for n in msng_mnfst_set:
1544 # If a 'missing' manifest thinks it belongs to a changenode
1553 # If a 'missing' manifest thinks it belongs to a changenode
1545 # the recipient is assumed to have, obviously the recipient
1554 # the recipient is assumed to have, obviously the recipient
1546 # must have that manifest.
1555 # must have that manifest.
1547 linknode = cl.node(mnfst.linkrev(n))
1556 linknode = cl.node(mnfst.linkrev(n))
1548 if linknode in has_cl_set:
1557 if linknode in has_cl_set:
1549 has_mnfst_set[n] = 1
1558 has_mnfst_set[n] = 1
1550 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1559 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1551
1560
1552 # Use the information collected in collect_manifests_and_files to say
1561 # Use the information collected in collect_manifests_and_files to say
1553 # which changenode any manifestnode belongs to.
1562 # which changenode any manifestnode belongs to.
1554 def lookup_manifest_link(mnfstnode):
1563 def lookup_manifest_link(mnfstnode):
1555 return msng_mnfst_set[mnfstnode]
1564 return msng_mnfst_set[mnfstnode]
1556
1565
1557 # A function generating function that sets up the initial environment
1566 # A function generating function that sets up the initial environment
1558 # the inner function.
1567 # the inner function.
1559 def filenode_collector(changedfiles):
1568 def filenode_collector(changedfiles):
1560 next_rev = [0]
1569 next_rev = [0]
1561 # This gathers information from each manifestnode included in the
1570 # This gathers information from each manifestnode included in the
1562 # changegroup about which filenodes the manifest node references
1571 # changegroup about which filenodes the manifest node references
1563 # so we can include those in the changegroup too.
1572 # so we can include those in the changegroup too.
1564 #
1573 #
1565 # It also remembers which changenode each filenode belongs to. It
1574 # It also remembers which changenode each filenode belongs to. It
1566 # does this by assuming the a filenode belongs to the changenode
1575 # does this by assuming the a filenode belongs to the changenode
1567 # the first manifest that references it belongs to.
1576 # the first manifest that references it belongs to.
1568 def collect_msng_filenodes(mnfstnode):
1577 def collect_msng_filenodes(mnfstnode):
1569 r = mnfst.rev(mnfstnode)
1578 r = mnfst.rev(mnfstnode)
1570 if r == next_rev[0]:
1579 if r == next_rev[0]:
1571 # If the last rev we looked at was the one just previous,
1580 # If the last rev we looked at was the one just previous,
1572 # we only need to see a diff.
1581 # we only need to see a diff.
1573 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1582 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1574 # For each line in the delta
1583 # For each line in the delta
1575 for dline in delta.splitlines():
1584 for dline in delta.splitlines():
1576 # get the filename and filenode for that line
1585 # get the filename and filenode for that line
1577 f, fnode = dline.split('\0')
1586 f, fnode = dline.split('\0')
1578 fnode = bin(fnode[:40])
1587 fnode = bin(fnode[:40])
1579 f = changedfiles.get(f, None)
1588 f = changedfiles.get(f, None)
1580 # And if the file is in the list of files we care
1589 # And if the file is in the list of files we care
1581 # about.
1590 # about.
1582 if f is not None:
1591 if f is not None:
1583 # Get the changenode this manifest belongs to
1592 # Get the changenode this manifest belongs to
1584 clnode = msng_mnfst_set[mnfstnode]
1593 clnode = msng_mnfst_set[mnfstnode]
1585 # Create the set of filenodes for the file if
1594 # Create the set of filenodes for the file if
1586 # there isn't one already.
1595 # there isn't one already.
1587 ndset = msng_filenode_set.setdefault(f, {})
1596 ndset = msng_filenode_set.setdefault(f, {})
1588 # And set the filenode's changelog node to the
1597 # And set the filenode's changelog node to the
1589 # manifest's if it hasn't been set already.
1598 # manifest's if it hasn't been set already.
1590 ndset.setdefault(fnode, clnode)
1599 ndset.setdefault(fnode, clnode)
1591 else:
1600 else:
1592 # Otherwise we need a full manifest.
1601 # Otherwise we need a full manifest.
1593 m = mnfst.read(mnfstnode)
1602 m = mnfst.read(mnfstnode)
1594 # For every file in we care about.
1603 # For every file in we care about.
1595 for f in changedfiles:
1604 for f in changedfiles:
1596 fnode = m.get(f, None)
1605 fnode = m.get(f, None)
1597 # If it's in the manifest
1606 # If it's in the manifest
1598 if fnode is not None:
1607 if fnode is not None:
1599 # See comments above.
1608 # See comments above.
1600 clnode = msng_mnfst_set[mnfstnode]
1609 clnode = msng_mnfst_set[mnfstnode]
1601 ndset = msng_filenode_set.setdefault(f, {})
1610 ndset = msng_filenode_set.setdefault(f, {})
1602 ndset.setdefault(fnode, clnode)
1611 ndset.setdefault(fnode, clnode)
1603 # Remember the revision we hope to see next.
1612 # Remember the revision we hope to see next.
1604 next_rev[0] = r + 1
1613 next_rev[0] = r + 1
1605 return collect_msng_filenodes
1614 return collect_msng_filenodes
1606
1615
1607 # We have a list of filenodes we think we need for a file, lets remove
1616 # We have a list of filenodes we think we need for a file, lets remove
1608 # all those we now the recipient must have.
1617 # all those we now the recipient must have.
1609 def prune_filenodes(f, filerevlog):
1618 def prune_filenodes(f, filerevlog):
1610 msngset = msng_filenode_set[f]
1619 msngset = msng_filenode_set[f]
1611 hasset = {}
1620 hasset = {}
1612 # If a 'missing' filenode thinks it belongs to a changenode we
1621 # If a 'missing' filenode thinks it belongs to a changenode we
1613 # assume the recipient must have, then the recipient must have
1622 # assume the recipient must have, then the recipient must have
1614 # that filenode.
1623 # that filenode.
1615 for n in msngset:
1624 for n in msngset:
1616 clnode = cl.node(filerevlog.linkrev(n))
1625 clnode = cl.node(filerevlog.linkrev(n))
1617 if clnode in has_cl_set:
1626 if clnode in has_cl_set:
1618 hasset[n] = 1
1627 hasset[n] = 1
1619 prune_parents(filerevlog, hasset, msngset)
1628 prune_parents(filerevlog, hasset, msngset)
1620
1629
1621 # A function generator function that sets up the a context for the
1630 # A function generator function that sets up the a context for the
1622 # inner function.
1631 # inner function.
1623 def lookup_filenode_link_func(fname):
1632 def lookup_filenode_link_func(fname):
1624 msngset = msng_filenode_set[fname]
1633 msngset = msng_filenode_set[fname]
1625 # Lookup the changenode the filenode belongs to.
1634 # Lookup the changenode the filenode belongs to.
1626 def lookup_filenode_link(fnode):
1635 def lookup_filenode_link(fnode):
1627 return msngset[fnode]
1636 return msngset[fnode]
1628 return lookup_filenode_link
1637 return lookup_filenode_link
1629
1638
1630 # Now that we have all theses utility functions to help out and
1639 # Now that we have all theses utility functions to help out and
1631 # logically divide up the task, generate the group.
1640 # logically divide up the task, generate the group.
1632 def gengroup():
1641 def gengroup():
1633 # The set of changed files starts empty.
1642 # The set of changed files starts empty.
1634 changedfiles = {}
1643 changedfiles = {}
1635 # Create a changenode group generator that will call our functions
1644 # Create a changenode group generator that will call our functions
1636 # back to lookup the owning changenode and collect information.
1645 # back to lookup the owning changenode and collect information.
1637 group = cl.group(msng_cl_lst, identity,
1646 group = cl.group(msng_cl_lst, identity,
1638 manifest_and_file_collector(changedfiles))
1647 manifest_and_file_collector(changedfiles))
1639 for chnk in group:
1648 for chnk in group:
1640 yield chnk
1649 yield chnk
1641
1650
1642 # The list of manifests has been collected by the generator
1651 # The list of manifests has been collected by the generator
1643 # calling our functions back.
1652 # calling our functions back.
1644 prune_manifests()
1653 prune_manifests()
1645 msng_mnfst_lst = msng_mnfst_set.keys()
1654 msng_mnfst_lst = msng_mnfst_set.keys()
1646 # Sort the manifestnodes by revision number.
1655 # Sort the manifestnodes by revision number.
1647 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1656 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1648 # Create a generator for the manifestnodes that calls our lookup
1657 # Create a generator for the manifestnodes that calls our lookup
1649 # and data collection functions back.
1658 # and data collection functions back.
1650 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1659 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1651 filenode_collector(changedfiles))
1660 filenode_collector(changedfiles))
1652 for chnk in group:
1661 for chnk in group:
1653 yield chnk
1662 yield chnk
1654
1663
1655 # These are no longer needed, dereference and toss the memory for
1664 # These are no longer needed, dereference and toss the memory for
1656 # them.
1665 # them.
1657 msng_mnfst_lst = None
1666 msng_mnfst_lst = None
1658 msng_mnfst_set.clear()
1667 msng_mnfst_set.clear()
1659
1668
1660 changedfiles = changedfiles.keys()
1669 changedfiles = changedfiles.keys()
1661 changedfiles.sort()
1670 changedfiles.sort()
1662 # Go through all our files in order sorted by name.
1671 # Go through all our files in order sorted by name.
1663 for fname in changedfiles:
1672 for fname in changedfiles:
1664 filerevlog = self.file(fname)
1673 filerevlog = self.file(fname)
1665 # Toss out the filenodes that the recipient isn't really
1674 # Toss out the filenodes that the recipient isn't really
1666 # missing.
1675 # missing.
1667 if msng_filenode_set.has_key(fname):
1676 if msng_filenode_set.has_key(fname):
1668 prune_filenodes(fname, filerevlog)
1677 prune_filenodes(fname, filerevlog)
1669 msng_filenode_lst = msng_filenode_set[fname].keys()
1678 msng_filenode_lst = msng_filenode_set[fname].keys()
1670 else:
1679 else:
1671 msng_filenode_lst = []
1680 msng_filenode_lst = []
1672 # If any filenodes are left, generate the group for them,
1681 # If any filenodes are left, generate the group for them,
1673 # otherwise don't bother.
1682 # otherwise don't bother.
1674 if len(msng_filenode_lst) > 0:
1683 if len(msng_filenode_lst) > 0:
1675 yield changegroup.genchunk(fname)
1684 yield changegroup.genchunk(fname)
1676 # Sort the filenodes by their revision #
1685 # Sort the filenodes by their revision #
1677 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1686 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1678 # Create a group generator and only pass in a changenode
1687 # Create a group generator and only pass in a changenode
1679 # lookup function as we need to collect no information
1688 # lookup function as we need to collect no information
1680 # from filenodes.
1689 # from filenodes.
1681 group = filerevlog.group(msng_filenode_lst,
1690 group = filerevlog.group(msng_filenode_lst,
1682 lookup_filenode_link_func(fname))
1691 lookup_filenode_link_func(fname))
1683 for chnk in group:
1692 for chnk in group:
1684 yield chnk
1693 yield chnk
1685 if msng_filenode_set.has_key(fname):
1694 if msng_filenode_set.has_key(fname):
1686 # Don't need this anymore, toss it to free memory.
1695 # Don't need this anymore, toss it to free memory.
1687 del msng_filenode_set[fname]
1696 del msng_filenode_set[fname]
1688 # Signal that no more groups are left.
1697 # Signal that no more groups are left.
1689 yield changegroup.closechunk()
1698 yield changegroup.closechunk()
1690
1699
1691 if msng_cl_lst:
1700 if msng_cl_lst:
1692 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1701 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1693
1702
1694 return util.chunkbuffer(gengroup())
1703 return util.chunkbuffer(gengroup())
1695
1704
1696 def changegroup(self, basenodes, source):
1705 def changegroup(self, basenodes, source):
1697 """Generate a changegroup of all nodes that we have that a recipient
1706 """Generate a changegroup of all nodes that we have that a recipient
1698 doesn't.
1707 doesn't.
1699
1708
1700 This is much easier than the previous function as we can assume that
1709 This is much easier than the previous function as we can assume that
1701 the recipient has any changenode we aren't sending them."""
1710 the recipient has any changenode we aren't sending them."""
1702
1711
1703 self.hook('preoutgoing', throw=True, source=source)
1712 self.hook('preoutgoing', throw=True, source=source)
1704
1713
1705 cl = self.changelog
1714 cl = self.changelog
1706 nodes = cl.nodesbetween(basenodes, None)[0]
1715 nodes = cl.nodesbetween(basenodes, None)[0]
1707 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1716 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1708 self.changegroupinfo(nodes)
1717 self.changegroupinfo(nodes)
1709
1718
1710 def identity(x):
1719 def identity(x):
1711 return x
1720 return x
1712
1721
1713 def gennodelst(revlog):
1722 def gennodelst(revlog):
1714 for r in xrange(0, revlog.count()):
1723 for r in xrange(0, revlog.count()):
1715 n = revlog.node(r)
1724 n = revlog.node(r)
1716 if revlog.linkrev(n) in revset:
1725 if revlog.linkrev(n) in revset:
1717 yield n
1726 yield n
1718
1727
1719 def changed_file_collector(changedfileset):
1728 def changed_file_collector(changedfileset):
1720 def collect_changed_files(clnode):
1729 def collect_changed_files(clnode):
1721 c = cl.read(clnode)
1730 c = cl.read(clnode)
1722 for fname in c[3]:
1731 for fname in c[3]:
1723 changedfileset[fname] = 1
1732 changedfileset[fname] = 1
1724 return collect_changed_files
1733 return collect_changed_files
1725
1734
1726 def lookuprevlink_func(revlog):
1735 def lookuprevlink_func(revlog):
1727 def lookuprevlink(n):
1736 def lookuprevlink(n):
1728 return cl.node(revlog.linkrev(n))
1737 return cl.node(revlog.linkrev(n))
1729 return lookuprevlink
1738 return lookuprevlink
1730
1739
1731 def gengroup():
1740 def gengroup():
1732 # construct a list of all changed files
1741 # construct a list of all changed files
1733 changedfiles = {}
1742 changedfiles = {}
1734
1743
1735 for chnk in cl.group(nodes, identity,
1744 for chnk in cl.group(nodes, identity,
1736 changed_file_collector(changedfiles)):
1745 changed_file_collector(changedfiles)):
1737 yield chnk
1746 yield chnk
1738 changedfiles = changedfiles.keys()
1747 changedfiles = changedfiles.keys()
1739 changedfiles.sort()
1748 changedfiles.sort()
1740
1749
1741 mnfst = self.manifest
1750 mnfst = self.manifest
1742 nodeiter = gennodelst(mnfst)
1751 nodeiter = gennodelst(mnfst)
1743 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1752 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1744 yield chnk
1753 yield chnk
1745
1754
1746 for fname in changedfiles:
1755 for fname in changedfiles:
1747 filerevlog = self.file(fname)
1756 filerevlog = self.file(fname)
1748 nodeiter = gennodelst(filerevlog)
1757 nodeiter = gennodelst(filerevlog)
1749 nodeiter = list(nodeiter)
1758 nodeiter = list(nodeiter)
1750 if nodeiter:
1759 if nodeiter:
1751 yield changegroup.genchunk(fname)
1760 yield changegroup.genchunk(fname)
1752 lookup = lookuprevlink_func(filerevlog)
1761 lookup = lookuprevlink_func(filerevlog)
1753 for chnk in filerevlog.group(nodeiter, lookup):
1762 for chnk in filerevlog.group(nodeiter, lookup):
1754 yield chnk
1763 yield chnk
1755
1764
1756 yield changegroup.closechunk()
1765 yield changegroup.closechunk()
1757
1766
1758 if nodes:
1767 if nodes:
1759 self.hook('outgoing', node=hex(nodes[0]), source=source)
1768 self.hook('outgoing', node=hex(nodes[0]), source=source)
1760
1769
1761 return util.chunkbuffer(gengroup())
1770 return util.chunkbuffer(gengroup())
1762
1771
1763 def addchangegroup(self, source, srctype, url):
1772 def addchangegroup(self, source, srctype, url):
1764 """add changegroup to repo.
1773 """add changegroup to repo.
1765
1774
1766 return values:
1775 return values:
1767 - nothing changed or no source: 0
1776 - nothing changed or no source: 0
1768 - more heads than before: 1+added heads (2..n)
1777 - more heads than before: 1+added heads (2..n)
1769 - less heads than before: -1-removed heads (-2..-n)
1778 - less heads than before: -1-removed heads (-2..-n)
1770 - number of heads stays the same: 1
1779 - number of heads stays the same: 1
1771 """
1780 """
1772 def csmap(x):
1781 def csmap(x):
1773 self.ui.debug(_("add changeset %s\n") % short(x))
1782 self.ui.debug(_("add changeset %s\n") % short(x))
1774 return cl.count()
1783 return cl.count()
1775
1784
1776 def revmap(x):
1785 def revmap(x):
1777 return cl.rev(x)
1786 return cl.rev(x)
1778
1787
1779 if not source:
1788 if not source:
1780 return 0
1789 return 0
1781
1790
1782 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1791 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1783
1792
1784 changesets = files = revisions = 0
1793 changesets = files = revisions = 0
1785
1794
1786 tr = self.transaction()
1795 tr = self.transaction()
1787
1796
1788 # write changelog data to temp files so concurrent readers will not see
1797 # write changelog data to temp files so concurrent readers will not see
1789 # inconsistent view
1798 # inconsistent view
1790 cl = None
1799 cl = None
1791 try:
1800 try:
1792 cl = appendfile.appendchangelog(self.sopener,
1801 cl = appendfile.appendchangelog(self.sopener,
1793 self.changelog.version)
1802 self.changelog.version)
1794
1803
1795 oldheads = len(cl.heads())
1804 oldheads = len(cl.heads())
1796
1805
1797 # pull off the changeset group
1806 # pull off the changeset group
1798 self.ui.status(_("adding changesets\n"))
1807 self.ui.status(_("adding changesets\n"))
1799 cor = cl.count() - 1
1808 cor = cl.count() - 1
1800 chunkiter = changegroup.chunkiter(source)
1809 chunkiter = changegroup.chunkiter(source)
1801 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1810 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1802 raise util.Abort(_("received changelog group is empty"))
1811 raise util.Abort(_("received changelog group is empty"))
1803 cnr = cl.count() - 1
1812 cnr = cl.count() - 1
1804 changesets = cnr - cor
1813 changesets = cnr - cor
1805
1814
1806 # pull off the manifest group
1815 # pull off the manifest group
1807 self.ui.status(_("adding manifests\n"))
1816 self.ui.status(_("adding manifests\n"))
1808 chunkiter = changegroup.chunkiter(source)
1817 chunkiter = changegroup.chunkiter(source)
1809 # no need to check for empty manifest group here:
1818 # no need to check for empty manifest group here:
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1819 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 # no new manifest will be created and the manifest group will
1820 # no new manifest will be created and the manifest group will
1812 # be empty during the pull
1821 # be empty during the pull
1813 self.manifest.addgroup(chunkiter, revmap, tr)
1822 self.manifest.addgroup(chunkiter, revmap, tr)
1814
1823
1815 # process the files
1824 # process the files
1816 self.ui.status(_("adding file changes\n"))
1825 self.ui.status(_("adding file changes\n"))
1817 while 1:
1826 while 1:
1818 f = changegroup.getchunk(source)
1827 f = changegroup.getchunk(source)
1819 if not f:
1828 if not f:
1820 break
1829 break
1821 self.ui.debug(_("adding %s revisions\n") % f)
1830 self.ui.debug(_("adding %s revisions\n") % f)
1822 fl = self.file(f)
1831 fl = self.file(f)
1823 o = fl.count()
1832 o = fl.count()
1824 chunkiter = changegroup.chunkiter(source)
1833 chunkiter = changegroup.chunkiter(source)
1825 if fl.addgroup(chunkiter, revmap, tr) is None:
1834 if fl.addgroup(chunkiter, revmap, tr) is None:
1826 raise util.Abort(_("received file revlog group is empty"))
1835 raise util.Abort(_("received file revlog group is empty"))
1827 revisions += fl.count() - o
1836 revisions += fl.count() - o
1828 files += 1
1837 files += 1
1829
1838
1830 cl.writedata()
1839 cl.writedata()
1831 finally:
1840 finally:
1832 if cl:
1841 if cl:
1833 cl.cleanup()
1842 cl.cleanup()
1834
1843
1835 # make changelog see real files again
1844 # make changelog see real files again
1836 self.changelog = changelog.changelog(self.sopener,
1845 self.changelog = changelog.changelog(self.sopener,
1837 self.changelog.version)
1846 self.changelog.version)
1838 self.changelog.checkinlinesize(tr)
1847 self.changelog.checkinlinesize(tr)
1839
1848
1840 newheads = len(self.changelog.heads())
1849 newheads = len(self.changelog.heads())
1841 heads = ""
1850 heads = ""
1842 if oldheads and newheads != oldheads:
1851 if oldheads and newheads != oldheads:
1843 heads = _(" (%+d heads)") % (newheads - oldheads)
1852 heads = _(" (%+d heads)") % (newheads - oldheads)
1844
1853
1845 self.ui.status(_("added %d changesets"
1854 self.ui.status(_("added %d changesets"
1846 " with %d changes to %d files%s\n")
1855 " with %d changes to %d files%s\n")
1847 % (changesets, revisions, files, heads))
1856 % (changesets, revisions, files, heads))
1848
1857
1849 if changesets > 0:
1858 if changesets > 0:
1850 self.hook('pretxnchangegroup', throw=True,
1859 self.hook('pretxnchangegroup', throw=True,
1851 node=hex(self.changelog.node(cor+1)), source=srctype,
1860 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 url=url)
1861 url=url)
1853
1862
1854 tr.close()
1863 tr.close()
1855
1864
1856 if changesets > 0:
1865 if changesets > 0:
1857 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1866 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 source=srctype, url=url)
1867 source=srctype, url=url)
1859
1868
1860 for i in xrange(cor + 1, cnr + 1):
1869 for i in xrange(cor + 1, cnr + 1):
1861 self.hook("incoming", node=hex(self.changelog.node(i)),
1870 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 source=srctype, url=url)
1871 source=srctype, url=url)
1863
1872
1864 # never return 0 here:
1873 # never return 0 here:
1865 if newheads < oldheads:
1874 if newheads < oldheads:
1866 return newheads - oldheads - 1
1875 return newheads - oldheads - 1
1867 else:
1876 else:
1868 return newheads - oldheads + 1
1877 return newheads - oldheads + 1
1869
1878
1870
1879
1871 def stream_in(self, remote):
1880 def stream_in(self, remote):
1872 fp = remote.stream_out()
1881 fp = remote.stream_out()
1873 l = fp.readline()
1882 l = fp.readline()
1874 try:
1883 try:
1875 resp = int(l)
1884 resp = int(l)
1876 except ValueError:
1885 except ValueError:
1877 raise util.UnexpectedOutput(
1886 raise util.UnexpectedOutput(
1878 _('Unexpected response from remote server:'), l)
1887 _('Unexpected response from remote server:'), l)
1879 if resp == 1:
1888 if resp == 1:
1880 raise util.Abort(_('operation forbidden by server'))
1889 raise util.Abort(_('operation forbidden by server'))
1881 elif resp == 2:
1890 elif resp == 2:
1882 raise util.Abort(_('locking the remote repository failed'))
1891 raise util.Abort(_('locking the remote repository failed'))
1883 elif resp != 0:
1892 elif resp != 0:
1884 raise util.Abort(_('the server sent an unknown error code'))
1893 raise util.Abort(_('the server sent an unknown error code'))
1885 self.ui.status(_('streaming all changes\n'))
1894 self.ui.status(_('streaming all changes\n'))
1886 l = fp.readline()
1895 l = fp.readline()
1887 try:
1896 try:
1888 total_files, total_bytes = map(int, l.split(' ', 1))
1897 total_files, total_bytes = map(int, l.split(' ', 1))
1889 except ValueError, TypeError:
1898 except ValueError, TypeError:
1890 raise util.UnexpectedOutput(
1899 raise util.UnexpectedOutput(
1891 _('Unexpected response from remote server:'), l)
1900 _('Unexpected response from remote server:'), l)
1892 self.ui.status(_('%d files to transfer, %s of data\n') %
1901 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 (total_files, util.bytecount(total_bytes)))
1902 (total_files, util.bytecount(total_bytes)))
1894 start = time.time()
1903 start = time.time()
1895 for i in xrange(total_files):
1904 for i in xrange(total_files):
1896 # XXX doesn't support '\n' or '\r' in filenames
1905 # XXX doesn't support '\n' or '\r' in filenames
1897 l = fp.readline()
1906 l = fp.readline()
1898 try:
1907 try:
1899 name, size = l.split('\0', 1)
1908 name, size = l.split('\0', 1)
1900 size = int(size)
1909 size = int(size)
1901 except ValueError, TypeError:
1910 except ValueError, TypeError:
1902 raise util.UnexpectedOutput(
1911 raise util.UnexpectedOutput(
1903 _('Unexpected response from remote server:'), l)
1912 _('Unexpected response from remote server:'), l)
1904 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1913 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 ofp = self.sopener(name, 'w')
1914 ofp = self.sopener(name, 'w')
1906 for chunk in util.filechunkiter(fp, limit=size):
1915 for chunk in util.filechunkiter(fp, limit=size):
1907 ofp.write(chunk)
1916 ofp.write(chunk)
1908 ofp.close()
1917 ofp.close()
1909 elapsed = time.time() - start
1918 elapsed = time.time() - start
1910 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1919 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1911 (util.bytecount(total_bytes), elapsed,
1920 (util.bytecount(total_bytes), elapsed,
1912 util.bytecount(total_bytes / elapsed)))
1921 util.bytecount(total_bytes / elapsed)))
1913 self.reload()
1922 self.reload()
1914 return len(self.heads()) + 1
1923 return len(self.heads()) + 1
1915
1924
1916 def clone(self, remote, heads=[], stream=False):
1925 def clone(self, remote, heads=[], stream=False):
1917 '''clone remote repository.
1926 '''clone remote repository.
1918
1927
1919 keyword arguments:
1928 keyword arguments:
1920 heads: list of revs to clone (forces use of pull)
1929 heads: list of revs to clone (forces use of pull)
1921 stream: use streaming clone if possible'''
1930 stream: use streaming clone if possible'''
1922
1931
1923 # now, all clients that can request uncompressed clones can
1932 # now, all clients that can request uncompressed clones can
1924 # read repo formats supported by all servers that can serve
1933 # read repo formats supported by all servers that can serve
1925 # them.
1934 # them.
1926
1935
1927 # if revlog format changes, client will have to check version
1936 # if revlog format changes, client will have to check version
1928 # and format flags on "stream" capability, and use
1937 # and format flags on "stream" capability, and use
1929 # uncompressed only if compatible.
1938 # uncompressed only if compatible.
1930
1939
1931 if stream and not heads and remote.capable('stream'):
1940 if stream and not heads and remote.capable('stream'):
1932 return self.stream_in(remote)
1941 return self.stream_in(remote)
1933 return self.pull(remote, heads)
1942 return self.pull(remote, heads)
1934
1943
1935 # used to avoid circular references so destructors work
1944 # used to avoid circular references so destructors work
1936 def aftertrans(files):
1945 def aftertrans(files):
1937 renamefiles = [tuple(t) for t in files]
1946 renamefiles = [tuple(t) for t in files]
1938 def a():
1947 def a():
1939 for src, dest in renamefiles:
1948 for src, dest in renamefiles:
1940 util.rename(src, dest)
1949 util.rename(src, dest)
1941 return a
1950 return a
1942
1951
1943 def instance(ui, path, create):
1952 def instance(ui, path, create):
1944 return localrepository(ui, util.drop_scheme('file', path), create)
1953 return localrepository(ui, util.drop_scheme('file', path), create)
1945
1954
1946 def islocal(path):
1955 def islocal(path):
1947 return True
1956 return True
@@ -1,79 +1,86 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = "/".join((p, urllib.quote(path)))
28 f = "/".join((p, urllib.quote(path)))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
36 self.spath = self.path
37 self.ui = ui
35 self.ui = ui
38 self.revlogversion = 0
36 self.revlogversion = 0
37
38 self.path = (path + "/.hg")
39 self.opener = opener(self.path)
39 self.opener = opener(self.path)
40 # find requirements
40 # find requirements
41 try:
41 try:
42 requirements = self.opener("requires").read().splitlines()
42 requirements = self.opener("requires").read().splitlines()
43 except IOError:
43 except IOError:
44 requirements = []
44 requirements = []
45 # check them
45 # check them
46 for r in requirements:
46 for r in requirements:
47 if r not in self.supported:
47 if r not in self.supported:
48 raise repo.RepoError(_("requirement '%s' not supported") % r)
48 raise repo.RepoError(_("requirement '%s' not supported") % r)
49
49
50 # setup store
50 # setup store
51 if "store" in requirements:
52 self.encodefn = util.encodefilename
53 self.decodefn = util.decodefilename
54 self.spath = self.path + "/store"
55 else:
56 self.encodefn = lambda x: x
57 self.decodefn = lambda x: x
51 self.spath = self.path
58 self.spath = self.path
52 self.sopener = opener(self.spath)
59 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
53
60
54 self.manifest = manifest.manifest(self.sopener)
61 self.manifest = manifest.manifest(self.sopener)
55 self.changelog = changelog.changelog(self.sopener)
62 self.changelog = changelog.changelog(self.sopener)
56 self.tagscache = None
63 self.tagscache = None
57 self.nodetagscache = None
64 self.nodetagscache = None
58 self.encodepats = None
65 self.encodepats = None
59 self.decodepats = None
66 self.decodepats = None
60
67
61 def url(self):
68 def url(self):
62 return 'static-' + self._url
69 return 'static-' + self._url
63
70
64 def dev(self):
71 def dev(self):
65 return -1
72 return -1
66
73
67 def local(self):
74 def local(self):
68 return False
75 return False
69
76
70 def instance(ui, path, create):
77 def instance(ui, path, create):
71 if create:
78 if create:
72 raise util.Abort(_('cannot create new static-http repository'))
79 raise util.Abort(_('cannot create new static-http repository'))
73 if path.startswith('old-http:'):
80 if path.startswith('old-http:'):
74 ui.warn(_("old-http:// syntax is deprecated, "
81 ui.warn(_("old-http:// syntax is deprecated, "
75 "please use static-http:// instead\n"))
82 "please use static-http:// instead\n"))
76 path = path[4:]
83 path = path[4:]
77 else:
84 else:
78 path = path[7:]
85 path = path[7:]
79 return statichttprepository(ui, path)
86 return statichttprepository(ui, path)
@@ -1,95 +1,97 b''
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import demandload
8 from demandload import demandload
9 from i18n import gettext as _
9 from i18n import gettext as _
10 demandload(globals(), "os stat util lock")
10 demandload(globals(), "os stat util lock")
11
11
12 # if server supports streaming clone, it advertises "stream"
12 # if server supports streaming clone, it advertises "stream"
13 # capability with value that is version+flags of repo it is serving.
13 # capability with value that is version+flags of repo it is serving.
14 # client only streams if it can read that repo format.
14 # client only streams if it can read that repo format.
15
15
16 def walkrepo(root):
16 def walkrepo(root):
17 '''iterate over metadata files in repository.
17 '''iterate over metadata files in repository.
18 walk in natural (sorted) order.
18 walk in natural (sorted) order.
19 yields 2-tuples: name of .d or .i file, size of file.'''
19 yields 2-tuples: name of .d or .i file, size of file.'''
20
20
21 strip_count = len(root) + len(os.sep)
21 strip_count = len(root) + len(os.sep)
22 def walk(path, recurse):
22 def walk(path, recurse):
23 ents = os.listdir(path)
23 ents = os.listdir(path)
24 ents.sort()
24 ents.sort()
25 for e in ents:
25 for e in ents:
26 pe = os.path.join(path, e)
26 pe = os.path.join(path, e)
27 st = os.lstat(pe)
27 st = os.lstat(pe)
28 if stat.S_ISDIR(st.st_mode):
28 if stat.S_ISDIR(st.st_mode):
29 if recurse:
29 if recurse:
30 for x in walk(pe, True):
30 for x in walk(pe, True):
31 yield x
31 yield x
32 else:
32 else:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 continue
34 continue
35 sfx = e[-2:]
35 sfx = e[-2:]
36 if sfx in ('.d', '.i'):
36 if sfx in ('.d', '.i'):
37 yield pe[strip_count:], st.st_size
37 yield pe[strip_count:], st.st_size
38 # write file data first
38 # write file data first
39 for x in walk(os.path.join(root, 'data'), True):
39 for x in walk(os.path.join(root, 'data'), True):
40 yield x
40 yield x
41 # write manifest before changelog
41 # write manifest before changelog
42 meta = list(walk(root, False))
42 meta = list(walk(root, False))
43 meta.sort()
43 meta.sort()
44 meta.reverse()
44 meta.reverse()
45 for x in meta:
45 for x in meta:
46 yield x
46 yield x
47
47
48 # stream file format is simple.
48 # stream file format is simple.
49 #
49 #
50 # server writes out line that says how many files, how many total
50 # server writes out line that says how many files, how many total
51 # bytes. separator is ascii space, byte counts are strings.
51 # bytes. separator is ascii space, byte counts are strings.
52 #
52 #
53 # then for each file:
53 # then for each file:
54 #
54 #
55 # server writes out line that says file name, how many bytes in
55 # server writes out line that says file name, how many bytes in
56 # file. separator is ascii nul, byte count is string.
56 # file. separator is ascii nul, byte count is string.
57 #
57 #
58 # server writes out raw file data.
58 # server writes out raw file data.
59
59
60 def stream_out(repo, fileobj):
60 def stream_out(repo, fileobj):
61 '''stream out all metadata files in repository.
61 '''stream out all metadata files in repository.
62 writes to file-like object, must support write() and optional flush().'''
62 writes to file-like object, must support write() and optional flush().'''
63
63
64 if not repo.ui.configbool('server', 'uncompressed'):
64 if not repo.ui.configbool('server', 'uncompressed'):
65 fileobj.write('1\n')
65 fileobj.write('1\n')
66 return
66 return
67
67
68 # get consistent snapshot of repo. lock during scan so lock not
68 # get consistent snapshot of repo. lock during scan so lock not
69 # needed while we stream, and commits can happen.
69 # needed while we stream, and commits can happen.
70 try:
70 try:
71 repolock = repo.lock()
71 repolock = repo.lock()
72 except (lock.LockHeld, lock.LockUnavailable), inst:
72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 fileobj.write('2\n')
74 fileobj.write('2\n')
75 return
75 return
76
76
77 fileobj.write('0\n')
77 fileobj.write('0\n')
78 repo.ui.debug('scanning\n')
78 repo.ui.debug('scanning\n')
79 entries = []
79 entries = []
80 total_bytes = 0
80 total_bytes = 0
81 for name, size in walkrepo(repo.spath):
81 for name, size in walkrepo(repo.spath):
82 if repo.decodefn:
83 name = repo.decodefn(name)
82 entries.append((name, size))
84 entries.append((name, size))
83 total_bytes += size
85 total_bytes += size
84 repolock.release()
86 repolock.release()
85
87
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
88 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 (len(entries), total_bytes))
89 (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
90 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 for name, size in entries:
91 for name, size in entries:
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
92 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
93 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
94 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 fileobj.write(chunk)
95 fileobj.write(chunk)
94 flush = getattr(fileobj, 'flush', None)
96 flush = getattr(fileobj, 'flush', None)
95 if flush: flush()
97 if flush: flush()
@@ -1,1210 +1,1214 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import gettext as _
15 from i18n import gettext as _
16 from demandload import *
16 from demandload import *
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
18 demandload(globals(), "os threading time calendar ConfigParser locale")
18 demandload(globals(), "os threading time calendar ConfigParser locale")
19
19
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding()
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding()
21 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
21 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
22
22
23 def tolocal(s):
23 def tolocal(s):
24 """
24 """
25 Convert a string from internal UTF-8 to local encoding
25 Convert a string from internal UTF-8 to local encoding
26
26
27 All internal strings should be UTF-8 but some repos before the
27 All internal strings should be UTF-8 but some repos before the
28 implementation of locale support may contain latin1 or possibly
28 implementation of locale support may contain latin1 or possibly
29 other character sets. We attempt to decode everything strictly
29 other character sets. We attempt to decode everything strictly
30 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
30 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
31 replace unknown characters.
31 replace unknown characters.
32 """
32 """
33 for e in "utf-8 latin1".split():
33 for e in "utf-8 latin1".split():
34 try:
34 try:
35 u = s.decode(e) # attempt strict decoding
35 u = s.decode(e) # attempt strict decoding
36 return u.encode(_encoding, "replace")
36 return u.encode(_encoding, "replace")
37 except UnicodeDecodeError:
37 except UnicodeDecodeError:
38 pass
38 pass
39 u = s.decode("utf-8", "replace") # last ditch
39 u = s.decode("utf-8", "replace") # last ditch
40 return u.encode(_encoding, "replace")
40 return u.encode(_encoding, "replace")
41
41
42 def fromlocal(s):
42 def fromlocal(s):
43 """
43 """
44 Convert a string from the local character encoding to UTF-8
44 Convert a string from the local character encoding to UTF-8
45
45
46 We attempt to decode strings using the encoding mode set by
46 We attempt to decode strings using the encoding mode set by
47 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
47 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
48 characters will cause an error message. Other modes include
48 characters will cause an error message. Other modes include
49 'replace', which replaces unknown characters with a special
49 'replace', which replaces unknown characters with a special
50 Unicode character, and 'ignore', which drops the character.
50 Unicode character, and 'ignore', which drops the character.
51 """
51 """
52 try:
52 try:
53 return s.decode(_encoding, _encodingmode).encode("utf-8")
53 return s.decode(_encoding, _encodingmode).encode("utf-8")
54 except UnicodeDecodeError, inst:
54 except UnicodeDecodeError, inst:
55 sub = s[max(0, inst.start-10):inst.start+10]
55 sub = s[max(0, inst.start-10):inst.start+10]
56 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
56 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
57
57
58 def locallen(s):
58 def locallen(s):
59 """Find the length in characters of a local string"""
59 """Find the length in characters of a local string"""
60 return len(s.decode(_encoding, "replace"))
60 return len(s.decode(_encoding, "replace"))
61
61
62 def localsub(s, a, b=None):
62 def localsub(s, a, b=None):
63 try:
63 try:
64 u = s.decode(_encoding, _encodingmode)
64 u = s.decode(_encoding, _encodingmode)
65 if b is not None:
65 if b is not None:
66 u = u[a:b]
66 u = u[a:b]
67 else:
67 else:
68 u = u[:a]
68 u = u[:a]
69 return u.encode(_encoding, _encodingmode)
69 return u.encode(_encoding, _encodingmode)
70 except UnicodeDecodeError, inst:
70 except UnicodeDecodeError, inst:
71 sub = s[max(0, inst.start-10), inst.start+10]
71 sub = s[max(0, inst.start-10), inst.start+10]
72 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
72 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
73
73
74 # used by parsedate
74 # used by parsedate
75 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
75 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
76 '%a %b %d %H:%M:%S %Y')
76 '%a %b %d %H:%M:%S %Y')
77
77
78 class SignalInterrupt(Exception):
78 class SignalInterrupt(Exception):
79 """Exception raised on SIGTERM and SIGHUP."""
79 """Exception raised on SIGTERM and SIGHUP."""
80
80
81 # like SafeConfigParser but with case-sensitive keys
81 # like SafeConfigParser but with case-sensitive keys
82 class configparser(ConfigParser.SafeConfigParser):
82 class configparser(ConfigParser.SafeConfigParser):
83 def optionxform(self, optionstr):
83 def optionxform(self, optionstr):
84 return optionstr
84 return optionstr
85
85
86 def cachefunc(func):
86 def cachefunc(func):
87 '''cache the result of function calls'''
87 '''cache the result of function calls'''
88 # XXX doesn't handle keywords args
88 # XXX doesn't handle keywords args
89 cache = {}
89 cache = {}
90 if func.func_code.co_argcount == 1:
90 if func.func_code.co_argcount == 1:
91 # we gain a small amount of time because
91 # we gain a small amount of time because
92 # we don't need to pack/unpack the list
92 # we don't need to pack/unpack the list
93 def f(arg):
93 def f(arg):
94 if arg not in cache:
94 if arg not in cache:
95 cache[arg] = func(arg)
95 cache[arg] = func(arg)
96 return cache[arg]
96 return cache[arg]
97 else:
97 else:
98 def f(*args):
98 def f(*args):
99 if args not in cache:
99 if args not in cache:
100 cache[args] = func(*args)
100 cache[args] = func(*args)
101 return cache[args]
101 return cache[args]
102
102
103 return f
103 return f
104
104
105 def pipefilter(s, cmd):
105 def pipefilter(s, cmd):
106 '''filter string S through command CMD, returning its output'''
106 '''filter string S through command CMD, returning its output'''
107 (pout, pin) = popen2.popen2(cmd, -1, 'b')
107 (pout, pin) = popen2.popen2(cmd, -1, 'b')
108 def writer():
108 def writer():
109 try:
109 try:
110 pin.write(s)
110 pin.write(s)
111 pin.close()
111 pin.close()
112 except IOError, inst:
112 except IOError, inst:
113 if inst.errno != errno.EPIPE:
113 if inst.errno != errno.EPIPE:
114 raise
114 raise
115
115
116 # we should use select instead on UNIX, but this will work on most
116 # we should use select instead on UNIX, but this will work on most
117 # systems, including Windows
117 # systems, including Windows
118 w = threading.Thread(target=writer)
118 w = threading.Thread(target=writer)
119 w.start()
119 w.start()
120 f = pout.read()
120 f = pout.read()
121 pout.close()
121 pout.close()
122 w.join()
122 w.join()
123 return f
123 return f
124
124
125 def tempfilter(s, cmd):
125 def tempfilter(s, cmd):
126 '''filter string S through a pair of temporary files with CMD.
126 '''filter string S through a pair of temporary files with CMD.
127 CMD is used as a template to create the real command to be run,
127 CMD is used as a template to create the real command to be run,
128 with the strings INFILE and OUTFILE replaced by the real names of
128 with the strings INFILE and OUTFILE replaced by the real names of
129 the temporary files generated.'''
129 the temporary files generated.'''
130 inname, outname = None, None
130 inname, outname = None, None
131 try:
131 try:
132 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
132 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
133 fp = os.fdopen(infd, 'wb')
133 fp = os.fdopen(infd, 'wb')
134 fp.write(s)
134 fp.write(s)
135 fp.close()
135 fp.close()
136 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
136 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
137 os.close(outfd)
137 os.close(outfd)
138 cmd = cmd.replace('INFILE', inname)
138 cmd = cmd.replace('INFILE', inname)
139 cmd = cmd.replace('OUTFILE', outname)
139 cmd = cmd.replace('OUTFILE', outname)
140 code = os.system(cmd)
140 code = os.system(cmd)
141 if code: raise Abort(_("command '%s' failed: %s") %
141 if code: raise Abort(_("command '%s' failed: %s") %
142 (cmd, explain_exit(code)))
142 (cmd, explain_exit(code)))
143 return open(outname, 'rb').read()
143 return open(outname, 'rb').read()
144 finally:
144 finally:
145 try:
145 try:
146 if inname: os.unlink(inname)
146 if inname: os.unlink(inname)
147 except: pass
147 except: pass
148 try:
148 try:
149 if outname: os.unlink(outname)
149 if outname: os.unlink(outname)
150 except: pass
150 except: pass
151
151
152 filtertable = {
152 filtertable = {
153 'tempfile:': tempfilter,
153 'tempfile:': tempfilter,
154 'pipe:': pipefilter,
154 'pipe:': pipefilter,
155 }
155 }
156
156
157 def filter(s, cmd):
157 def filter(s, cmd):
158 "filter a string through a command that transforms its input to its output"
158 "filter a string through a command that transforms its input to its output"
159 for name, fn in filtertable.iteritems():
159 for name, fn in filtertable.iteritems():
160 if cmd.startswith(name):
160 if cmd.startswith(name):
161 return fn(s, cmd[len(name):].lstrip())
161 return fn(s, cmd[len(name):].lstrip())
162 return pipefilter(s, cmd)
162 return pipefilter(s, cmd)
163
163
164 def find_in_path(name, path, default=None):
164 def find_in_path(name, path, default=None):
165 '''find name in search path. path can be string (will be split
165 '''find name in search path. path can be string (will be split
166 with os.pathsep), or iterable thing that returns strings. if name
166 with os.pathsep), or iterable thing that returns strings. if name
167 found, return path to name. else return default.'''
167 found, return path to name. else return default.'''
168 if isinstance(path, str):
168 if isinstance(path, str):
169 path = path.split(os.pathsep)
169 path = path.split(os.pathsep)
170 for p in path:
170 for p in path:
171 p_name = os.path.join(p, name)
171 p_name = os.path.join(p, name)
172 if os.path.exists(p_name):
172 if os.path.exists(p_name):
173 return p_name
173 return p_name
174 return default
174 return default
175
175
176 def binary(s):
176 def binary(s):
177 """return true if a string is binary data using diff's heuristic"""
177 """return true if a string is binary data using diff's heuristic"""
178 if s and '\0' in s[:4096]:
178 if s and '\0' in s[:4096]:
179 return True
179 return True
180 return False
180 return False
181
181
182 def unique(g):
182 def unique(g):
183 """return the uniq elements of iterable g"""
183 """return the uniq elements of iterable g"""
184 seen = {}
184 seen = {}
185 l = []
185 l = []
186 for f in g:
186 for f in g:
187 if f not in seen:
187 if f not in seen:
188 seen[f] = 1
188 seen[f] = 1
189 l.append(f)
189 l.append(f)
190 return l
190 return l
191
191
192 class Abort(Exception):
192 class Abort(Exception):
193 """Raised if a command needs to print an error and exit."""
193 """Raised if a command needs to print an error and exit."""
194
194
195 class UnexpectedOutput(Abort):
195 class UnexpectedOutput(Abort):
196 """Raised to print an error with part of output and exit."""
196 """Raised to print an error with part of output and exit."""
197
197
198 def always(fn): return True
198 def always(fn): return True
199 def never(fn): return False
199 def never(fn): return False
200
200
201 def patkind(name, dflt_pat='glob'):
201 def patkind(name, dflt_pat='glob'):
202 """Split a string into an optional pattern kind prefix and the
202 """Split a string into an optional pattern kind prefix and the
203 actual pattern."""
203 actual pattern."""
204 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
204 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
205 if name.startswith(prefix + ':'): return name.split(':', 1)
205 if name.startswith(prefix + ':'): return name.split(':', 1)
206 return dflt_pat, name
206 return dflt_pat, name
207
207
208 def globre(pat, head='^', tail='$'):
208 def globre(pat, head='^', tail='$'):
209 "convert a glob pattern into a regexp"
209 "convert a glob pattern into a regexp"
210 i, n = 0, len(pat)
210 i, n = 0, len(pat)
211 res = ''
211 res = ''
212 group = False
212 group = False
213 def peek(): return i < n and pat[i]
213 def peek(): return i < n and pat[i]
214 while i < n:
214 while i < n:
215 c = pat[i]
215 c = pat[i]
216 i = i+1
216 i = i+1
217 if c == '*':
217 if c == '*':
218 if peek() == '*':
218 if peek() == '*':
219 i += 1
219 i += 1
220 res += '.*'
220 res += '.*'
221 else:
221 else:
222 res += '[^/]*'
222 res += '[^/]*'
223 elif c == '?':
223 elif c == '?':
224 res += '.'
224 res += '.'
225 elif c == '[':
225 elif c == '[':
226 j = i
226 j = i
227 if j < n and pat[j] in '!]':
227 if j < n and pat[j] in '!]':
228 j += 1
228 j += 1
229 while j < n and pat[j] != ']':
229 while j < n and pat[j] != ']':
230 j += 1
230 j += 1
231 if j >= n:
231 if j >= n:
232 res += '\\['
232 res += '\\['
233 else:
233 else:
234 stuff = pat[i:j].replace('\\','\\\\')
234 stuff = pat[i:j].replace('\\','\\\\')
235 i = j + 1
235 i = j + 1
236 if stuff[0] == '!':
236 if stuff[0] == '!':
237 stuff = '^' + stuff[1:]
237 stuff = '^' + stuff[1:]
238 elif stuff[0] == '^':
238 elif stuff[0] == '^':
239 stuff = '\\' + stuff
239 stuff = '\\' + stuff
240 res = '%s[%s]' % (res, stuff)
240 res = '%s[%s]' % (res, stuff)
241 elif c == '{':
241 elif c == '{':
242 group = True
242 group = True
243 res += '(?:'
243 res += '(?:'
244 elif c == '}' and group:
244 elif c == '}' and group:
245 res += ')'
245 res += ')'
246 group = False
246 group = False
247 elif c == ',' and group:
247 elif c == ',' and group:
248 res += '|'
248 res += '|'
249 elif c == '\\':
249 elif c == '\\':
250 p = peek()
250 p = peek()
251 if p:
251 if p:
252 i += 1
252 i += 1
253 res += re.escape(p)
253 res += re.escape(p)
254 else:
254 else:
255 res += re.escape(c)
255 res += re.escape(c)
256 else:
256 else:
257 res += re.escape(c)
257 res += re.escape(c)
258 return head + res + tail
258 return head + res + tail
259
259
260 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
260 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
261
261
262 def pathto(n1, n2):
262 def pathto(n1, n2):
263 '''return the relative path from one place to another.
263 '''return the relative path from one place to another.
264 n1 should use os.sep to separate directories
264 n1 should use os.sep to separate directories
265 n2 should use "/" to separate directories
265 n2 should use "/" to separate directories
266 returns an os.sep-separated path.
266 returns an os.sep-separated path.
267 '''
267 '''
268 if not n1: return localpath(n2)
268 if not n1: return localpath(n2)
269 a, b = n1.split(os.sep), n2.split('/')
269 a, b = n1.split(os.sep), n2.split('/')
270 a.reverse()
270 a.reverse()
271 b.reverse()
271 b.reverse()
272 while a and b and a[-1] == b[-1]:
272 while a and b and a[-1] == b[-1]:
273 a.pop()
273 a.pop()
274 b.pop()
274 b.pop()
275 b.reverse()
275 b.reverse()
276 return os.sep.join((['..'] * len(a)) + b)
276 return os.sep.join((['..'] * len(a)) + b)
277
277
278 def canonpath(root, cwd, myname):
278 def canonpath(root, cwd, myname):
279 """return the canonical path of myname, given cwd and root"""
279 """return the canonical path of myname, given cwd and root"""
280 if root == os.sep:
280 if root == os.sep:
281 rootsep = os.sep
281 rootsep = os.sep
282 elif root.endswith(os.sep):
282 elif root.endswith(os.sep):
283 rootsep = root
283 rootsep = root
284 else:
284 else:
285 rootsep = root + os.sep
285 rootsep = root + os.sep
286 name = myname
286 name = myname
287 if not os.path.isabs(name):
287 if not os.path.isabs(name):
288 name = os.path.join(root, cwd, name)
288 name = os.path.join(root, cwd, name)
289 name = os.path.normpath(name)
289 name = os.path.normpath(name)
290 if name != rootsep and name.startswith(rootsep):
290 if name != rootsep and name.startswith(rootsep):
291 name = name[len(rootsep):]
291 name = name[len(rootsep):]
292 audit_path(name)
292 audit_path(name)
293 return pconvert(name)
293 return pconvert(name)
294 elif name == root:
294 elif name == root:
295 return ''
295 return ''
296 else:
296 else:
297 # Determine whether `name' is in the hierarchy at or beneath `root',
297 # Determine whether `name' is in the hierarchy at or beneath `root',
298 # by iterating name=dirname(name) until that causes no change (can't
298 # by iterating name=dirname(name) until that causes no change (can't
299 # check name == '/', because that doesn't work on windows). For each
299 # check name == '/', because that doesn't work on windows). For each
300 # `name', compare dev/inode numbers. If they match, the list `rel'
300 # `name', compare dev/inode numbers. If they match, the list `rel'
301 # holds the reversed list of components making up the relative file
301 # holds the reversed list of components making up the relative file
302 # name we want.
302 # name we want.
303 root_st = os.stat(root)
303 root_st = os.stat(root)
304 rel = []
304 rel = []
305 while True:
305 while True:
306 try:
306 try:
307 name_st = os.stat(name)
307 name_st = os.stat(name)
308 except OSError:
308 except OSError:
309 break
309 break
310 if samestat(name_st, root_st):
310 if samestat(name_st, root_st):
311 rel.reverse()
311 rel.reverse()
312 name = os.path.join(*rel)
312 name = os.path.join(*rel)
313 audit_path(name)
313 audit_path(name)
314 return pconvert(name)
314 return pconvert(name)
315 dirname, basename = os.path.split(name)
315 dirname, basename = os.path.split(name)
316 rel.append(basename)
316 rel.append(basename)
317 if dirname == name:
317 if dirname == name:
318 break
318 break
319 name = dirname
319 name = dirname
320
320
321 raise Abort('%s not under root' % myname)
321 raise Abort('%s not under root' % myname)
322
322
323 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
323 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
324 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
324 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
325
325
326 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
326 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
327 if os.name == 'nt':
327 if os.name == 'nt':
328 dflt_pat = 'glob'
328 dflt_pat = 'glob'
329 else:
329 else:
330 dflt_pat = 'relpath'
330 dflt_pat = 'relpath'
331 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
331 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
332
332
333 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
333 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
334 """build a function to match a set of file patterns
334 """build a function to match a set of file patterns
335
335
336 arguments:
336 arguments:
337 canonroot - the canonical root of the tree you're matching against
337 canonroot - the canonical root of the tree you're matching against
338 cwd - the current working directory, if relevant
338 cwd - the current working directory, if relevant
339 names - patterns to find
339 names - patterns to find
340 inc - patterns to include
340 inc - patterns to include
341 exc - patterns to exclude
341 exc - patterns to exclude
342 head - a regex to prepend to patterns to control whether a match is rooted
342 head - a regex to prepend to patterns to control whether a match is rooted
343
343
344 a pattern is one of:
344 a pattern is one of:
345 'glob:<rooted glob>'
345 'glob:<rooted glob>'
346 're:<rooted regexp>'
346 're:<rooted regexp>'
347 'path:<rooted path>'
347 'path:<rooted path>'
348 'relglob:<relative glob>'
348 'relglob:<relative glob>'
349 'relpath:<relative path>'
349 'relpath:<relative path>'
350 'relre:<relative regexp>'
350 'relre:<relative regexp>'
351 '<rooted path or regexp>'
351 '<rooted path or regexp>'
352
352
353 returns:
353 returns:
354 a 3-tuple containing
354 a 3-tuple containing
355 - list of explicit non-pattern names passed in
355 - list of explicit non-pattern names passed in
356 - a bool match(filename) function
356 - a bool match(filename) function
357 - a bool indicating if any patterns were passed in
357 - a bool indicating if any patterns were passed in
358
358
359 todo:
359 todo:
360 make head regex a rooted bool
360 make head regex a rooted bool
361 """
361 """
362
362
363 def contains_glob(name):
363 def contains_glob(name):
364 for c in name:
364 for c in name:
365 if c in _globchars: return True
365 if c in _globchars: return True
366 return False
366 return False
367
367
368 def regex(kind, name, tail):
368 def regex(kind, name, tail):
369 '''convert a pattern into a regular expression'''
369 '''convert a pattern into a regular expression'''
370 if kind == 're':
370 if kind == 're':
371 return name
371 return name
372 elif kind == 'path':
372 elif kind == 'path':
373 return '^' + re.escape(name) + '(?:/|$)'
373 return '^' + re.escape(name) + '(?:/|$)'
374 elif kind == 'relglob':
374 elif kind == 'relglob':
375 return head + globre(name, '(?:|.*/)', tail)
375 return head + globre(name, '(?:|.*/)', tail)
376 elif kind == 'relpath':
376 elif kind == 'relpath':
377 return head + re.escape(name) + tail
377 return head + re.escape(name) + tail
378 elif kind == 'relre':
378 elif kind == 'relre':
379 if name.startswith('^'):
379 if name.startswith('^'):
380 return name
380 return name
381 return '.*' + name
381 return '.*' + name
382 return head + globre(name, '', tail)
382 return head + globre(name, '', tail)
383
383
384 def matchfn(pats, tail):
384 def matchfn(pats, tail):
385 """build a matching function from a set of patterns"""
385 """build a matching function from a set of patterns"""
386 if not pats:
386 if not pats:
387 return
387 return
388 matches = []
388 matches = []
389 for k, p in pats:
389 for k, p in pats:
390 try:
390 try:
391 pat = '(?:%s)' % regex(k, p, tail)
391 pat = '(?:%s)' % regex(k, p, tail)
392 matches.append(re.compile(pat).match)
392 matches.append(re.compile(pat).match)
393 except re.error:
393 except re.error:
394 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
394 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
395 else: raise Abort("invalid pattern (%s): %s" % (k, p))
395 else: raise Abort("invalid pattern (%s): %s" % (k, p))
396
396
397 def buildfn(text):
397 def buildfn(text):
398 for m in matches:
398 for m in matches:
399 r = m(text)
399 r = m(text)
400 if r:
400 if r:
401 return r
401 return r
402
402
403 return buildfn
403 return buildfn
404
404
405 def globprefix(pat):
405 def globprefix(pat):
406 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
406 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
407 root = []
407 root = []
408 for p in pat.split(os.sep):
408 for p in pat.split(os.sep):
409 if contains_glob(p): break
409 if contains_glob(p): break
410 root.append(p)
410 root.append(p)
411 return '/'.join(root)
411 return '/'.join(root)
412
412
413 pats = []
413 pats = []
414 files = []
414 files = []
415 roots = []
415 roots = []
416 for kind, name in [patkind(p, dflt_pat) for p in names]:
416 for kind, name in [patkind(p, dflt_pat) for p in names]:
417 if kind in ('glob', 'relpath'):
417 if kind in ('glob', 'relpath'):
418 name = canonpath(canonroot, cwd, name)
418 name = canonpath(canonroot, cwd, name)
419 if name == '':
419 if name == '':
420 kind, name = 'glob', '**'
420 kind, name = 'glob', '**'
421 if kind in ('glob', 'path', 're'):
421 if kind in ('glob', 'path', 're'):
422 pats.append((kind, name))
422 pats.append((kind, name))
423 if kind == 'glob':
423 if kind == 'glob':
424 root = globprefix(name)
424 root = globprefix(name)
425 if root: roots.append(root)
425 if root: roots.append(root)
426 elif kind == 'relpath':
426 elif kind == 'relpath':
427 files.append((kind, name))
427 files.append((kind, name))
428 roots.append(name)
428 roots.append(name)
429
429
430 patmatch = matchfn(pats, '$') or always
430 patmatch = matchfn(pats, '$') or always
431 filematch = matchfn(files, '(?:/|$)') or always
431 filematch = matchfn(files, '(?:/|$)') or always
432 incmatch = always
432 incmatch = always
433 if inc:
433 if inc:
434 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
434 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
435 incmatch = matchfn(inckinds, '(?:/|$)')
435 incmatch = matchfn(inckinds, '(?:/|$)')
436 excmatch = lambda fn: False
436 excmatch = lambda fn: False
437 if exc:
437 if exc:
438 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
438 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
439 excmatch = matchfn(exckinds, '(?:/|$)')
439 excmatch = matchfn(exckinds, '(?:/|$)')
440
440
441 return (roots,
441 return (roots,
442 lambda fn: (incmatch(fn) and not excmatch(fn) and
442 lambda fn: (incmatch(fn) and not excmatch(fn) and
443 (fn.endswith('/') or
443 (fn.endswith('/') or
444 (not pats and not files) or
444 (not pats and not files) or
445 (pats and patmatch(fn)) or
445 (pats and patmatch(fn)) or
446 (files and filematch(fn)))),
446 (files and filematch(fn)))),
447 (inc or exc or (pats and pats != [('glob', '**')])) and True)
447 (inc or exc or (pats and pats != [('glob', '**')])) and True)
448
448
449 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
449 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
450 '''enhanced shell command execution.
450 '''enhanced shell command execution.
451 run with environment maybe modified, maybe in different dir.
451 run with environment maybe modified, maybe in different dir.
452
452
453 if command fails and onerr is None, return status. if ui object,
453 if command fails and onerr is None, return status. if ui object,
454 print error message and return status, else raise onerr object as
454 print error message and return status, else raise onerr object as
455 exception.'''
455 exception.'''
456 def py2shell(val):
456 def py2shell(val):
457 'convert python object into string that is useful to shell'
457 'convert python object into string that is useful to shell'
458 if val in (None, False):
458 if val in (None, False):
459 return '0'
459 return '0'
460 if val == True:
460 if val == True:
461 return '1'
461 return '1'
462 return str(val)
462 return str(val)
463 oldenv = {}
463 oldenv = {}
464 for k in environ:
464 for k in environ:
465 oldenv[k] = os.environ.get(k)
465 oldenv[k] = os.environ.get(k)
466 if cwd is not None:
466 if cwd is not None:
467 oldcwd = os.getcwd()
467 oldcwd = os.getcwd()
468 try:
468 try:
469 for k, v in environ.iteritems():
469 for k, v in environ.iteritems():
470 os.environ[k] = py2shell(v)
470 os.environ[k] = py2shell(v)
471 if cwd is not None and oldcwd != cwd:
471 if cwd is not None and oldcwd != cwd:
472 os.chdir(cwd)
472 os.chdir(cwd)
473 rc = os.system(cmd)
473 rc = os.system(cmd)
474 if rc and onerr:
474 if rc and onerr:
475 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
475 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
476 explain_exit(rc)[0])
476 explain_exit(rc)[0])
477 if errprefix:
477 if errprefix:
478 errmsg = '%s: %s' % (errprefix, errmsg)
478 errmsg = '%s: %s' % (errprefix, errmsg)
479 try:
479 try:
480 onerr.warn(errmsg + '\n')
480 onerr.warn(errmsg + '\n')
481 except AttributeError:
481 except AttributeError:
482 raise onerr(errmsg)
482 raise onerr(errmsg)
483 return rc
483 return rc
484 finally:
484 finally:
485 for k, v in oldenv.iteritems():
485 for k, v in oldenv.iteritems():
486 if v is None:
486 if v is None:
487 del os.environ[k]
487 del os.environ[k]
488 else:
488 else:
489 os.environ[k] = v
489 os.environ[k] = v
490 if cwd is not None and oldcwd != cwd:
490 if cwd is not None and oldcwd != cwd:
491 os.chdir(oldcwd)
491 os.chdir(oldcwd)
492
492
493 def rename(src, dst):
493 def rename(src, dst):
494 """forcibly rename a file"""
494 """forcibly rename a file"""
495 try:
495 try:
496 os.rename(src, dst)
496 os.rename(src, dst)
497 except OSError, err:
497 except OSError, err:
498 # on windows, rename to existing file is not allowed, so we
498 # on windows, rename to existing file is not allowed, so we
499 # must delete destination first. but if file is open, unlink
499 # must delete destination first. but if file is open, unlink
500 # schedules it for delete but does not delete it. rename
500 # schedules it for delete but does not delete it. rename
501 # happens immediately even for open files, so we create
501 # happens immediately even for open files, so we create
502 # temporary file, delete it, rename destination to that name,
502 # temporary file, delete it, rename destination to that name,
503 # then delete that. then rename is safe to do.
503 # then delete that. then rename is safe to do.
504 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
504 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
505 os.close(fd)
505 os.close(fd)
506 os.unlink(temp)
506 os.unlink(temp)
507 os.rename(dst, temp)
507 os.rename(dst, temp)
508 os.unlink(temp)
508 os.unlink(temp)
509 os.rename(src, dst)
509 os.rename(src, dst)
510
510
511 def unlink(f):
511 def unlink(f):
512 """unlink and remove the directory if it is empty"""
512 """unlink and remove the directory if it is empty"""
513 os.unlink(f)
513 os.unlink(f)
514 # try removing directories that might now be empty
514 # try removing directories that might now be empty
515 try:
515 try:
516 os.removedirs(os.path.dirname(f))
516 os.removedirs(os.path.dirname(f))
517 except OSError:
517 except OSError:
518 pass
518 pass
519
519
520 def copyfile(src, dest):
520 def copyfile(src, dest):
521 "copy a file, preserving mode"
521 "copy a file, preserving mode"
522 try:
522 try:
523 shutil.copyfile(src, dest)
523 shutil.copyfile(src, dest)
524 shutil.copymode(src, dest)
524 shutil.copymode(src, dest)
525 except shutil.Error, inst:
525 except shutil.Error, inst:
526 raise util.Abort(str(inst))
526 raise util.Abort(str(inst))
527
527
528 def copyfiles(src, dst, hardlink=None):
528 def copyfiles(src, dst, hardlink=None):
529 """Copy a directory tree using hardlinks if possible"""
529 """Copy a directory tree using hardlinks if possible"""
530
530
531 if hardlink is None:
531 if hardlink is None:
532 hardlink = (os.stat(src).st_dev ==
532 hardlink = (os.stat(src).st_dev ==
533 os.stat(os.path.dirname(dst)).st_dev)
533 os.stat(os.path.dirname(dst)).st_dev)
534
534
535 if os.path.isdir(src):
535 if os.path.isdir(src):
536 os.mkdir(dst)
536 os.mkdir(dst)
537 for name in os.listdir(src):
537 for name in os.listdir(src):
538 srcname = os.path.join(src, name)
538 srcname = os.path.join(src, name)
539 dstname = os.path.join(dst, name)
539 dstname = os.path.join(dst, name)
540 copyfiles(srcname, dstname, hardlink)
540 copyfiles(srcname, dstname, hardlink)
541 else:
541 else:
542 if hardlink:
542 if hardlink:
543 try:
543 try:
544 os_link(src, dst)
544 os_link(src, dst)
545 except (IOError, OSError):
545 except (IOError, OSError):
546 hardlink = False
546 hardlink = False
547 shutil.copy(src, dst)
547 shutil.copy(src, dst)
548 else:
548 else:
549 shutil.copy(src, dst)
549 shutil.copy(src, dst)
550
550
551 def audit_path(path):
551 def audit_path(path):
552 """Abort if path contains dangerous components"""
552 """Abort if path contains dangerous components"""
553 parts = os.path.normcase(path).split(os.sep)
553 parts = os.path.normcase(path).split(os.sep)
554 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
554 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
555 or os.pardir in parts):
555 or os.pardir in parts):
556 raise Abort(_("path contains illegal component: %s\n") % path)
556 raise Abort(_("path contains illegal component: %s\n") % path)
557
557
558 def _makelock_file(info, pathname):
558 def _makelock_file(info, pathname):
559 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
559 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
560 os.write(ld, info)
560 os.write(ld, info)
561 os.close(ld)
561 os.close(ld)
562
562
563 def _readlock_file(pathname):
563 def _readlock_file(pathname):
564 return posixfile(pathname).read()
564 return posixfile(pathname).read()
565
565
566 def nlinks(pathname):
566 def nlinks(pathname):
567 """Return number of hardlinks for the given file."""
567 """Return number of hardlinks for the given file."""
568 return os.lstat(pathname).st_nlink
568 return os.lstat(pathname).st_nlink
569
569
570 if hasattr(os, 'link'):
570 if hasattr(os, 'link'):
571 os_link = os.link
571 os_link = os.link
572 else:
572 else:
573 def os_link(src, dst):
573 def os_link(src, dst):
574 raise OSError(0, _("Hardlinks not supported"))
574 raise OSError(0, _("Hardlinks not supported"))
575
575
576 def fstat(fp):
576 def fstat(fp):
577 '''stat file object that may not have fileno method.'''
577 '''stat file object that may not have fileno method.'''
578 try:
578 try:
579 return os.fstat(fp.fileno())
579 return os.fstat(fp.fileno())
580 except AttributeError:
580 except AttributeError:
581 return os.stat(fp.name)
581 return os.stat(fp.name)
582
582
583 posixfile = file
583 posixfile = file
584
584
585 def is_win_9x():
585 def is_win_9x():
586 '''return true if run on windows 95, 98 or me.'''
586 '''return true if run on windows 95, 98 or me.'''
587 try:
587 try:
588 return sys.getwindowsversion()[3] == 1
588 return sys.getwindowsversion()[3] == 1
589 except AttributeError:
589 except AttributeError:
590 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
590 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
591
591
592 getuser_fallback = None
592 getuser_fallback = None
593
593
594 def getuser():
594 def getuser():
595 '''return name of current user'''
595 '''return name of current user'''
596 try:
596 try:
597 return getpass.getuser()
597 return getpass.getuser()
598 except ImportError:
598 except ImportError:
599 # import of pwd will fail on windows - try fallback
599 # import of pwd will fail on windows - try fallback
600 if getuser_fallback:
600 if getuser_fallback:
601 return getuser_fallback()
601 return getuser_fallback()
602 # raised if win32api not available
602 # raised if win32api not available
603 raise Abort(_('user name not available - set USERNAME '
603 raise Abort(_('user name not available - set USERNAME '
604 'environment variable'))
604 'environment variable'))
605
605
606 def username(uid=None):
606 def username(uid=None):
607 """Return the name of the user with the given uid.
607 """Return the name of the user with the given uid.
608
608
609 If uid is None, return the name of the current user."""
609 If uid is None, return the name of the current user."""
610 try:
610 try:
611 import pwd
611 import pwd
612 if uid is None:
612 if uid is None:
613 uid = os.getuid()
613 uid = os.getuid()
614 try:
614 try:
615 return pwd.getpwuid(uid)[0]
615 return pwd.getpwuid(uid)[0]
616 except KeyError:
616 except KeyError:
617 return str(uid)
617 return str(uid)
618 except ImportError:
618 except ImportError:
619 return None
619 return None
620
620
621 def groupname(gid=None):
621 def groupname(gid=None):
622 """Return the name of the group with the given gid.
622 """Return the name of the group with the given gid.
623
623
624 If gid is None, return the name of the current group."""
624 If gid is None, return the name of the current group."""
625 try:
625 try:
626 import grp
626 import grp
627 if gid is None:
627 if gid is None:
628 gid = os.getgid()
628 gid = os.getgid()
629 try:
629 try:
630 return grp.getgrgid(gid)[0]
630 return grp.getgrgid(gid)[0]
631 except KeyError:
631 except KeyError:
632 return str(gid)
632 return str(gid)
633 except ImportError:
633 except ImportError:
634 return None
634 return None
635
635
636 # File system features
636 # File system features
637
637
638 def checkfolding(path):
638 def checkfolding(path):
639 """
639 """
640 Check whether the given path is on a case-sensitive filesystem
640 Check whether the given path is on a case-sensitive filesystem
641
641
642 Requires a path (like /foo/.hg) ending with a foldable final
642 Requires a path (like /foo/.hg) ending with a foldable final
643 directory component.
643 directory component.
644 """
644 """
645 s1 = os.stat(path)
645 s1 = os.stat(path)
646 d, b = os.path.split(path)
646 d, b = os.path.split(path)
647 p2 = os.path.join(d, b.upper())
647 p2 = os.path.join(d, b.upper())
648 if path == p2:
648 if path == p2:
649 p2 = os.path.join(d, b.lower())
649 p2 = os.path.join(d, b.lower())
650 try:
650 try:
651 s2 = os.stat(p2)
651 s2 = os.stat(p2)
652 if s2 == s1:
652 if s2 == s1:
653 return False
653 return False
654 return True
654 return True
655 except:
655 except:
656 return True
656 return True
657
657
658 # Platform specific variants
658 # Platform specific variants
659 if os.name == 'nt':
659 if os.name == 'nt':
660 demandload(globals(), "msvcrt")
660 demandload(globals(), "msvcrt")
661 nulldev = 'NUL:'
661 nulldev = 'NUL:'
662
662
663 class winstdout:
663 class winstdout:
664 '''stdout on windows misbehaves if sent through a pipe'''
664 '''stdout on windows misbehaves if sent through a pipe'''
665
665
666 def __init__(self, fp):
666 def __init__(self, fp):
667 self.fp = fp
667 self.fp = fp
668
668
669 def __getattr__(self, key):
669 def __getattr__(self, key):
670 return getattr(self.fp, key)
670 return getattr(self.fp, key)
671
671
672 def close(self):
672 def close(self):
673 try:
673 try:
674 self.fp.close()
674 self.fp.close()
675 except: pass
675 except: pass
676
676
677 def write(self, s):
677 def write(self, s):
678 try:
678 try:
679 return self.fp.write(s)
679 return self.fp.write(s)
680 except IOError, inst:
680 except IOError, inst:
681 if inst.errno != 0: raise
681 if inst.errno != 0: raise
682 self.close()
682 self.close()
683 raise IOError(errno.EPIPE, 'Broken pipe')
683 raise IOError(errno.EPIPE, 'Broken pipe')
684
684
685 sys.stdout = winstdout(sys.stdout)
685 sys.stdout = winstdout(sys.stdout)
686
686
687 def system_rcpath():
687 def system_rcpath():
688 try:
688 try:
689 return system_rcpath_win32()
689 return system_rcpath_win32()
690 except:
690 except:
691 return [r'c:\mercurial\mercurial.ini']
691 return [r'c:\mercurial\mercurial.ini']
692
692
693 def os_rcpath():
693 def os_rcpath():
694 '''return default os-specific hgrc search path'''
694 '''return default os-specific hgrc search path'''
695 path = system_rcpath()
695 path = system_rcpath()
696 path.append(user_rcpath())
696 path.append(user_rcpath())
697 userprofile = os.environ.get('USERPROFILE')
697 userprofile = os.environ.get('USERPROFILE')
698 if userprofile:
698 if userprofile:
699 path.append(os.path.join(userprofile, 'mercurial.ini'))
699 path.append(os.path.join(userprofile, 'mercurial.ini'))
700 return path
700 return path
701
701
702 def user_rcpath():
702 def user_rcpath():
703 '''return os-specific hgrc search path to the user dir'''
703 '''return os-specific hgrc search path to the user dir'''
704 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
704 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
705
705
706 def parse_patch_output(output_line):
706 def parse_patch_output(output_line):
707 """parses the output produced by patch and returns the file name"""
707 """parses the output produced by patch and returns the file name"""
708 pf = output_line[14:]
708 pf = output_line[14:]
709 if pf[0] == '`':
709 if pf[0] == '`':
710 pf = pf[1:-1] # Remove the quotes
710 pf = pf[1:-1] # Remove the quotes
711 return pf
711 return pf
712
712
713 def testpid(pid):
713 def testpid(pid):
714 '''return False if pid dead, True if running or not known'''
714 '''return False if pid dead, True if running or not known'''
715 return True
715 return True
716
716
717 def is_exec(f, last):
717 def is_exec(f, last):
718 return last
718 return last
719
719
720 def set_exec(f, mode):
720 def set_exec(f, mode):
721 pass
721 pass
722
722
723 def set_binary(fd):
723 def set_binary(fd):
724 msvcrt.setmode(fd.fileno(), os.O_BINARY)
724 msvcrt.setmode(fd.fileno(), os.O_BINARY)
725
725
726 def pconvert(path):
726 def pconvert(path):
727 return path.replace("\\", "/")
727 return path.replace("\\", "/")
728
728
729 def localpath(path):
729 def localpath(path):
730 return path.replace('/', '\\')
730 return path.replace('/', '\\')
731
731
732 def normpath(path):
732 def normpath(path):
733 return pconvert(os.path.normpath(path))
733 return pconvert(os.path.normpath(path))
734
734
735 makelock = _makelock_file
735 makelock = _makelock_file
736 readlock = _readlock_file
736 readlock = _readlock_file
737
737
738 def samestat(s1, s2):
738 def samestat(s1, s2):
739 return False
739 return False
740
740
741 def shellquote(s):
741 def shellquote(s):
742 return '"%s"' % s.replace('"', '\\"')
742 return '"%s"' % s.replace('"', '\\"')
743
743
744 def explain_exit(code):
744 def explain_exit(code):
745 return _("exited with status %d") % code, code
745 return _("exited with status %d") % code, code
746
746
747 # if you change this stub into a real check, please try to implement the
747 # if you change this stub into a real check, please try to implement the
748 # username and groupname functions above, too.
748 # username and groupname functions above, too.
749 def isowner(fp, st=None):
749 def isowner(fp, st=None):
750 return True
750 return True
751
751
752 try:
752 try:
753 # override functions with win32 versions if possible
753 # override functions with win32 versions if possible
754 from util_win32 import *
754 from util_win32 import *
755 if not is_win_9x():
755 if not is_win_9x():
756 posixfile = posixfile_nt
756 posixfile = posixfile_nt
757 except ImportError:
757 except ImportError:
758 pass
758 pass
759
759
760 else:
760 else:
761 nulldev = '/dev/null'
761 nulldev = '/dev/null'
762
762
763 def rcfiles(path):
763 def rcfiles(path):
764 rcs = [os.path.join(path, 'hgrc')]
764 rcs = [os.path.join(path, 'hgrc')]
765 rcdir = os.path.join(path, 'hgrc.d')
765 rcdir = os.path.join(path, 'hgrc.d')
766 try:
766 try:
767 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
767 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
768 if f.endswith(".rc")])
768 if f.endswith(".rc")])
769 except OSError:
769 except OSError:
770 pass
770 pass
771 return rcs
771 return rcs
772
772
773 def os_rcpath():
773 def os_rcpath():
774 '''return default os-specific hgrc search path'''
774 '''return default os-specific hgrc search path'''
775 path = []
775 path = []
776 # old mod_python does not set sys.argv
776 # old mod_python does not set sys.argv
777 if len(getattr(sys, 'argv', [])) > 0:
777 if len(getattr(sys, 'argv', [])) > 0:
778 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
778 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
779 '/../etc/mercurial'))
779 '/../etc/mercurial'))
780 path.extend(rcfiles('/etc/mercurial'))
780 path.extend(rcfiles('/etc/mercurial'))
781 path.append(os.path.expanduser('~/.hgrc'))
781 path.append(os.path.expanduser('~/.hgrc'))
782 path = [os.path.normpath(f) for f in path]
782 path = [os.path.normpath(f) for f in path]
783 return path
783 return path
784
784
785 def parse_patch_output(output_line):
785 def parse_patch_output(output_line):
786 """parses the output produced by patch and returns the file name"""
786 """parses the output produced by patch and returns the file name"""
787 pf = output_line[14:]
787 pf = output_line[14:]
788 if pf.startswith("'") and pf.endswith("'") and " " in pf:
788 if pf.startswith("'") and pf.endswith("'") and " " in pf:
789 pf = pf[1:-1] # Remove the quotes
789 pf = pf[1:-1] # Remove the quotes
790 return pf
790 return pf
791
791
792 def is_exec(f, last):
792 def is_exec(f, last):
793 """check whether a file is executable"""
793 """check whether a file is executable"""
794 return (os.lstat(f).st_mode & 0100 != 0)
794 return (os.lstat(f).st_mode & 0100 != 0)
795
795
796 def set_exec(f, mode):
796 def set_exec(f, mode):
797 s = os.lstat(f).st_mode
797 s = os.lstat(f).st_mode
798 if (s & 0100 != 0) == mode:
798 if (s & 0100 != 0) == mode:
799 return
799 return
800 if mode:
800 if mode:
801 # Turn on +x for every +r bit when making a file executable
801 # Turn on +x for every +r bit when making a file executable
802 # and obey umask.
802 # and obey umask.
803 umask = os.umask(0)
803 umask = os.umask(0)
804 os.umask(umask)
804 os.umask(umask)
805 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
805 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
806 else:
806 else:
807 os.chmod(f, s & 0666)
807 os.chmod(f, s & 0666)
808
808
809 def set_binary(fd):
809 def set_binary(fd):
810 pass
810 pass
811
811
812 def pconvert(path):
812 def pconvert(path):
813 return path
813 return path
814
814
815 def localpath(path):
815 def localpath(path):
816 return path
816 return path
817
817
818 normpath = os.path.normpath
818 normpath = os.path.normpath
819 samestat = os.path.samestat
819 samestat = os.path.samestat
820
820
821 def makelock(info, pathname):
821 def makelock(info, pathname):
822 try:
822 try:
823 os.symlink(info, pathname)
823 os.symlink(info, pathname)
824 except OSError, why:
824 except OSError, why:
825 if why.errno == errno.EEXIST:
825 if why.errno == errno.EEXIST:
826 raise
826 raise
827 else:
827 else:
828 _makelock_file(info, pathname)
828 _makelock_file(info, pathname)
829
829
830 def readlock(pathname):
830 def readlock(pathname):
831 try:
831 try:
832 return os.readlink(pathname)
832 return os.readlink(pathname)
833 except OSError, why:
833 except OSError, why:
834 if why.errno == errno.EINVAL:
834 if why.errno == errno.EINVAL:
835 return _readlock_file(pathname)
835 return _readlock_file(pathname)
836 else:
836 else:
837 raise
837 raise
838
838
839 def shellquote(s):
839 def shellquote(s):
840 return "'%s'" % s.replace("'", "'\\''")
840 return "'%s'" % s.replace("'", "'\\''")
841
841
842 def testpid(pid):
842 def testpid(pid):
843 '''return False if pid dead, True if running or not sure'''
843 '''return False if pid dead, True if running or not sure'''
844 try:
844 try:
845 os.kill(pid, 0)
845 os.kill(pid, 0)
846 return True
846 return True
847 except OSError, inst:
847 except OSError, inst:
848 return inst.errno != errno.ESRCH
848 return inst.errno != errno.ESRCH
849
849
850 def explain_exit(code):
850 def explain_exit(code):
851 """return a 2-tuple (desc, code) describing a process's status"""
851 """return a 2-tuple (desc, code) describing a process's status"""
852 if os.WIFEXITED(code):
852 if os.WIFEXITED(code):
853 val = os.WEXITSTATUS(code)
853 val = os.WEXITSTATUS(code)
854 return _("exited with status %d") % val, val
854 return _("exited with status %d") % val, val
855 elif os.WIFSIGNALED(code):
855 elif os.WIFSIGNALED(code):
856 val = os.WTERMSIG(code)
856 val = os.WTERMSIG(code)
857 return _("killed by signal %d") % val, val
857 return _("killed by signal %d") % val, val
858 elif os.WIFSTOPPED(code):
858 elif os.WIFSTOPPED(code):
859 val = os.WSTOPSIG(code)
859 val = os.WSTOPSIG(code)
860 return _("stopped by signal %d") % val, val
860 return _("stopped by signal %d") % val, val
861 raise ValueError(_("invalid exit code"))
861 raise ValueError(_("invalid exit code"))
862
862
863 def isowner(fp, st=None):
863 def isowner(fp, st=None):
864 """Return True if the file object f belongs to the current user.
864 """Return True if the file object f belongs to the current user.
865
865
866 The return value of a util.fstat(f) may be passed as the st argument.
866 The return value of a util.fstat(f) may be passed as the st argument.
867 """
867 """
868 if st is None:
868 if st is None:
869 st = fstat(f)
869 st = fstat(f)
870 return st.st_uid == os.getuid()
870 return st.st_uid == os.getuid()
871
871
872 def _buildencodefun():
872 def _buildencodefun():
873 e = '_'
873 e = '_'
874 win_reserved = [ord(x) for x in '|\?*<":>+[]']
874 win_reserved = [ord(x) for x in '|\?*<":>+[]']
875 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
875 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
876 for x in (range(32) + range(126, 256) + win_reserved):
876 for x in (range(32) + range(126, 256) + win_reserved):
877 cmap[chr(x)] = "~%02x" % x
877 cmap[chr(x)] = "~%02x" % x
878 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
878 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
879 cmap[chr(x)] = e + chr(x).lower()
879 cmap[chr(x)] = e + chr(x).lower()
880 dmap = {}
880 dmap = {}
881 for k, v in cmap.iteritems():
881 for k, v in cmap.iteritems():
882 dmap[v] = k
882 dmap[v] = k
883 def decode(s):
883 def decode(s):
884 i = 0
884 i = 0
885 while i < len(s):
885 while i < len(s):
886 for l in xrange(1, 4):
886 for l in xrange(1, 4):
887 try:
887 try:
888 yield dmap[s[i:i+l]]
888 yield dmap[s[i:i+l]]
889 i += l
889 i += l
890 break
890 break
891 except KeyError:
891 except KeyError:
892 pass
892 pass
893 else:
893 else:
894 raise KeyError
894 raise KeyError
895 return (lambda s: "".join([cmap[c] for c in s]),
895 return (lambda s: "".join([cmap[c] for c in s]),
896 lambda s: "".join(list(decode(s))))
896 lambda s: "".join(list(decode(s))))
897
897
898 encodefilename, decodefilename = _buildencodefun()
898 encodefilename, decodefilename = _buildencodefun()
899
899
900 def encodedopener(openerfn, fn):
901 def o(path, *args, **kw):
902 return openerfn(fn(path), *args, **kw)
903 return o
900
904
901 def opener(base, audit=True):
905 def opener(base, audit=True):
902 """
906 """
903 return a function that opens files relative to base
907 return a function that opens files relative to base
904
908
905 this function is used to hide the details of COW semantics and
909 this function is used to hide the details of COW semantics and
906 remote file access from higher level code.
910 remote file access from higher level code.
907 """
911 """
908 p = base
912 p = base
909 audit_p = audit
913 audit_p = audit
910
914
911 def mktempcopy(name):
915 def mktempcopy(name):
912 d, fn = os.path.split(name)
916 d, fn = os.path.split(name)
913 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
917 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
914 os.close(fd)
918 os.close(fd)
915 ofp = posixfile(temp, "wb")
919 ofp = posixfile(temp, "wb")
916 try:
920 try:
917 try:
921 try:
918 ifp = posixfile(name, "rb")
922 ifp = posixfile(name, "rb")
919 except IOError, inst:
923 except IOError, inst:
920 if not getattr(inst, 'filename', None):
924 if not getattr(inst, 'filename', None):
921 inst.filename = name
925 inst.filename = name
922 raise
926 raise
923 for chunk in filechunkiter(ifp):
927 for chunk in filechunkiter(ifp):
924 ofp.write(chunk)
928 ofp.write(chunk)
925 ifp.close()
929 ifp.close()
926 ofp.close()
930 ofp.close()
927 except:
931 except:
928 try: os.unlink(temp)
932 try: os.unlink(temp)
929 except: pass
933 except: pass
930 raise
934 raise
931 st = os.lstat(name)
935 st = os.lstat(name)
932 os.chmod(temp, st.st_mode)
936 os.chmod(temp, st.st_mode)
933 return temp
937 return temp
934
938
935 class atomictempfile(posixfile):
939 class atomictempfile(posixfile):
936 """the file will only be copied when rename is called"""
940 """the file will only be copied when rename is called"""
937 def __init__(self, name, mode):
941 def __init__(self, name, mode):
938 self.__name = name
942 self.__name = name
939 self.temp = mktempcopy(name)
943 self.temp = mktempcopy(name)
940 posixfile.__init__(self, self.temp, mode)
944 posixfile.__init__(self, self.temp, mode)
941 def rename(self):
945 def rename(self):
942 if not self.closed:
946 if not self.closed:
943 posixfile.close(self)
947 posixfile.close(self)
944 rename(self.temp, localpath(self.__name))
948 rename(self.temp, localpath(self.__name))
945 def __del__(self):
949 def __del__(self):
946 if not self.closed:
950 if not self.closed:
947 try:
951 try:
948 os.unlink(self.temp)
952 os.unlink(self.temp)
949 except: pass
953 except: pass
950 posixfile.close(self)
954 posixfile.close(self)
951
955
952 class atomicfile(atomictempfile):
956 class atomicfile(atomictempfile):
953 """the file will only be copied on close"""
957 """the file will only be copied on close"""
954 def __init__(self, name, mode):
958 def __init__(self, name, mode):
955 atomictempfile.__init__(self, name, mode)
959 atomictempfile.__init__(self, name, mode)
956 def close(self):
960 def close(self):
957 self.rename()
961 self.rename()
958 def __del__(self):
962 def __del__(self):
959 self.rename()
963 self.rename()
960
964
961 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
965 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
962 if audit_p:
966 if audit_p:
963 audit_path(path)
967 audit_path(path)
964 f = os.path.join(p, path)
968 f = os.path.join(p, path)
965
969
966 if not text:
970 if not text:
967 mode += "b" # for that other OS
971 mode += "b" # for that other OS
968
972
969 if mode[0] != "r":
973 if mode[0] != "r":
970 try:
974 try:
971 nlink = nlinks(f)
975 nlink = nlinks(f)
972 except OSError:
976 except OSError:
973 d = os.path.dirname(f)
977 d = os.path.dirname(f)
974 if not os.path.isdir(d):
978 if not os.path.isdir(d):
975 os.makedirs(d)
979 os.makedirs(d)
976 else:
980 else:
977 if atomic:
981 if atomic:
978 return atomicfile(f, mode)
982 return atomicfile(f, mode)
979 elif atomictemp:
983 elif atomictemp:
980 return atomictempfile(f, mode)
984 return atomictempfile(f, mode)
981 if nlink > 1:
985 if nlink > 1:
982 rename(mktempcopy(f), f)
986 rename(mktempcopy(f), f)
983 return posixfile(f, mode)
987 return posixfile(f, mode)
984
988
985 return o
989 return o
986
990
987 class chunkbuffer(object):
991 class chunkbuffer(object):
988 """Allow arbitrary sized chunks of data to be efficiently read from an
992 """Allow arbitrary sized chunks of data to be efficiently read from an
989 iterator over chunks of arbitrary size."""
993 iterator over chunks of arbitrary size."""
990
994
991 def __init__(self, in_iter, targetsize = 2**16):
995 def __init__(self, in_iter, targetsize = 2**16):
992 """in_iter is the iterator that's iterating over the input chunks.
996 """in_iter is the iterator that's iterating over the input chunks.
993 targetsize is how big a buffer to try to maintain."""
997 targetsize is how big a buffer to try to maintain."""
994 self.in_iter = iter(in_iter)
998 self.in_iter = iter(in_iter)
995 self.buf = ''
999 self.buf = ''
996 self.targetsize = int(targetsize)
1000 self.targetsize = int(targetsize)
997 if self.targetsize <= 0:
1001 if self.targetsize <= 0:
998 raise ValueError(_("targetsize must be greater than 0, was %d") %
1002 raise ValueError(_("targetsize must be greater than 0, was %d") %
999 targetsize)
1003 targetsize)
1000 self.iterempty = False
1004 self.iterempty = False
1001
1005
1002 def fillbuf(self):
1006 def fillbuf(self):
1003 """Ignore target size; read every chunk from iterator until empty."""
1007 """Ignore target size; read every chunk from iterator until empty."""
1004 if not self.iterempty:
1008 if not self.iterempty:
1005 collector = cStringIO.StringIO()
1009 collector = cStringIO.StringIO()
1006 collector.write(self.buf)
1010 collector.write(self.buf)
1007 for ch in self.in_iter:
1011 for ch in self.in_iter:
1008 collector.write(ch)
1012 collector.write(ch)
1009 self.buf = collector.getvalue()
1013 self.buf = collector.getvalue()
1010 self.iterempty = True
1014 self.iterempty = True
1011
1015
1012 def read(self, l):
1016 def read(self, l):
1013 """Read L bytes of data from the iterator of chunks of data.
1017 """Read L bytes of data from the iterator of chunks of data.
1014 Returns less than L bytes if the iterator runs dry."""
1018 Returns less than L bytes if the iterator runs dry."""
1015 if l > len(self.buf) and not self.iterempty:
1019 if l > len(self.buf) and not self.iterempty:
1016 # Clamp to a multiple of self.targetsize
1020 # Clamp to a multiple of self.targetsize
1017 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1021 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1018 collector = cStringIO.StringIO()
1022 collector = cStringIO.StringIO()
1019 collector.write(self.buf)
1023 collector.write(self.buf)
1020 collected = len(self.buf)
1024 collected = len(self.buf)
1021 for chunk in self.in_iter:
1025 for chunk in self.in_iter:
1022 collector.write(chunk)
1026 collector.write(chunk)
1023 collected += len(chunk)
1027 collected += len(chunk)
1024 if collected >= targetsize:
1028 if collected >= targetsize:
1025 break
1029 break
1026 if collected < targetsize:
1030 if collected < targetsize:
1027 self.iterempty = True
1031 self.iterempty = True
1028 self.buf = collector.getvalue()
1032 self.buf = collector.getvalue()
1029 s, self.buf = self.buf[:l], buffer(self.buf, l)
1033 s, self.buf = self.buf[:l], buffer(self.buf, l)
1030 return s
1034 return s
1031
1035
1032 def filechunkiter(f, size=65536, limit=None):
1036 def filechunkiter(f, size=65536, limit=None):
1033 """Create a generator that produces the data in the file size
1037 """Create a generator that produces the data in the file size
1034 (default 65536) bytes at a time, up to optional limit (default is
1038 (default 65536) bytes at a time, up to optional limit (default is
1035 to read all data). Chunks may be less than size bytes if the
1039 to read all data). Chunks may be less than size bytes if the
1036 chunk is the last chunk in the file, or the file is a socket or
1040 chunk is the last chunk in the file, or the file is a socket or
1037 some other type of file that sometimes reads less data than is
1041 some other type of file that sometimes reads less data than is
1038 requested."""
1042 requested."""
1039 assert size >= 0
1043 assert size >= 0
1040 assert limit is None or limit >= 0
1044 assert limit is None or limit >= 0
1041 while True:
1045 while True:
1042 if limit is None: nbytes = size
1046 if limit is None: nbytes = size
1043 else: nbytes = min(limit, size)
1047 else: nbytes = min(limit, size)
1044 s = nbytes and f.read(nbytes)
1048 s = nbytes and f.read(nbytes)
1045 if not s: break
1049 if not s: break
1046 if limit: limit -= len(s)
1050 if limit: limit -= len(s)
1047 yield s
1051 yield s
1048
1052
1049 def makedate():
1053 def makedate():
1050 lt = time.localtime()
1054 lt = time.localtime()
1051 if lt[8] == 1 and time.daylight:
1055 if lt[8] == 1 and time.daylight:
1052 tz = time.altzone
1056 tz = time.altzone
1053 else:
1057 else:
1054 tz = time.timezone
1058 tz = time.timezone
1055 return time.mktime(lt), tz
1059 return time.mktime(lt), tz
1056
1060
1057 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1061 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1058 """represent a (unixtime, offset) tuple as a localized time.
1062 """represent a (unixtime, offset) tuple as a localized time.
1059 unixtime is seconds since the epoch, and offset is the time zone's
1063 unixtime is seconds since the epoch, and offset is the time zone's
1060 number of seconds away from UTC. if timezone is false, do not
1064 number of seconds away from UTC. if timezone is false, do not
1061 append time zone to string."""
1065 append time zone to string."""
1062 t, tz = date or makedate()
1066 t, tz = date or makedate()
1063 s = time.strftime(format, time.gmtime(float(t) - tz))
1067 s = time.strftime(format, time.gmtime(float(t) - tz))
1064 if timezone:
1068 if timezone:
1065 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1069 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1066 return s
1070 return s
1067
1071
1068 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
1072 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
1069 """parse a localized time string and return a (unixtime, offset) tuple.
1073 """parse a localized time string and return a (unixtime, offset) tuple.
1070 if the string cannot be parsed, ValueError is raised."""
1074 if the string cannot be parsed, ValueError is raised."""
1071 def hastimezone(string):
1075 def hastimezone(string):
1072 return (string[-4:].isdigit() and
1076 return (string[-4:].isdigit() and
1073 (string[-5] == '+' or string[-5] == '-') and
1077 (string[-5] == '+' or string[-5] == '-') and
1074 string[-6].isspace())
1078 string[-6].isspace())
1075
1079
1076 # NOTE: unixtime = localunixtime + offset
1080 # NOTE: unixtime = localunixtime + offset
1077 if hastimezone(string):
1081 if hastimezone(string):
1078 date, tz = string[:-6], string[-5:]
1082 date, tz = string[:-6], string[-5:]
1079 tz = int(tz)
1083 tz = int(tz)
1080 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1084 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1081 else:
1085 else:
1082 date, offset = string, None
1086 date, offset = string, None
1083 timetuple = time.strptime(date, format)
1087 timetuple = time.strptime(date, format)
1084 localunixtime = int(calendar.timegm(timetuple))
1088 localunixtime = int(calendar.timegm(timetuple))
1085 if offset is None:
1089 if offset is None:
1086 # local timezone
1090 # local timezone
1087 unixtime = int(time.mktime(timetuple))
1091 unixtime = int(time.mktime(timetuple))
1088 offset = unixtime - localunixtime
1092 offset = unixtime - localunixtime
1089 else:
1093 else:
1090 unixtime = localunixtime + offset
1094 unixtime = localunixtime + offset
1091 return unixtime, offset
1095 return unixtime, offset
1092
1096
1093 def parsedate(string, formats=None):
1097 def parsedate(string, formats=None):
1094 """parse a localized time string and return a (unixtime, offset) tuple.
1098 """parse a localized time string and return a (unixtime, offset) tuple.
1095 The date may be a "unixtime offset" string or in one of the specified
1099 The date may be a "unixtime offset" string or in one of the specified
1096 formats."""
1100 formats."""
1097 if not formats:
1101 if not formats:
1098 formats = defaultdateformats
1102 formats = defaultdateformats
1099 try:
1103 try:
1100 when, offset = map(int, string.split(' '))
1104 when, offset = map(int, string.split(' '))
1101 except ValueError:
1105 except ValueError:
1102 for format in formats:
1106 for format in formats:
1103 try:
1107 try:
1104 when, offset = strdate(string, format)
1108 when, offset = strdate(string, format)
1105 except ValueError:
1109 except ValueError:
1106 pass
1110 pass
1107 else:
1111 else:
1108 break
1112 break
1109 else:
1113 else:
1110 raise ValueError(_('invalid date: %r '
1114 raise ValueError(_('invalid date: %r '
1111 'see hg(1) manual page for details')
1115 'see hg(1) manual page for details')
1112 % string)
1116 % string)
1113 # validate explicit (probably user-specified) date and
1117 # validate explicit (probably user-specified) date and
1114 # time zone offset. values must fit in signed 32 bits for
1118 # time zone offset. values must fit in signed 32 bits for
1115 # current 32-bit linux runtimes. timezones go from UTC-12
1119 # current 32-bit linux runtimes. timezones go from UTC-12
1116 # to UTC+14
1120 # to UTC+14
1117 if abs(when) > 0x7fffffff:
1121 if abs(when) > 0x7fffffff:
1118 raise ValueError(_('date exceeds 32 bits: %d') % when)
1122 raise ValueError(_('date exceeds 32 bits: %d') % when)
1119 if offset < -50400 or offset > 43200:
1123 if offset < -50400 or offset > 43200:
1120 raise ValueError(_('impossible time zone offset: %d') % offset)
1124 raise ValueError(_('impossible time zone offset: %d') % offset)
1121 return when, offset
1125 return when, offset
1122
1126
1123 def shortuser(user):
1127 def shortuser(user):
1124 """Return a short representation of a user name or email address."""
1128 """Return a short representation of a user name or email address."""
1125 f = user.find('@')
1129 f = user.find('@')
1126 if f >= 0:
1130 if f >= 0:
1127 user = user[:f]
1131 user = user[:f]
1128 f = user.find('<')
1132 f = user.find('<')
1129 if f >= 0:
1133 if f >= 0:
1130 user = user[f+1:]
1134 user = user[f+1:]
1131 f = user.find(' ')
1135 f = user.find(' ')
1132 if f >= 0:
1136 if f >= 0:
1133 user = user[:f]
1137 user = user[:f]
1134 f = user.find('.')
1138 f = user.find('.')
1135 if f >= 0:
1139 if f >= 0:
1136 user = user[:f]
1140 user = user[:f]
1137 return user
1141 return user
1138
1142
1139 def ellipsis(text, maxlength=400):
1143 def ellipsis(text, maxlength=400):
1140 """Trim string to at most maxlength (default: 400) characters."""
1144 """Trim string to at most maxlength (default: 400) characters."""
1141 if len(text) <= maxlength:
1145 if len(text) <= maxlength:
1142 return text
1146 return text
1143 else:
1147 else:
1144 return "%s..." % (text[:maxlength-3])
1148 return "%s..." % (text[:maxlength-3])
1145
1149
1146 def walkrepos(path):
1150 def walkrepos(path):
1147 '''yield every hg repository under path, recursively.'''
1151 '''yield every hg repository under path, recursively.'''
1148 def errhandler(err):
1152 def errhandler(err):
1149 if err.filename == path:
1153 if err.filename == path:
1150 raise err
1154 raise err
1151
1155
1152 for root, dirs, files in os.walk(path, onerror=errhandler):
1156 for root, dirs, files in os.walk(path, onerror=errhandler):
1153 for d in dirs:
1157 for d in dirs:
1154 if d == '.hg':
1158 if d == '.hg':
1155 yield root
1159 yield root
1156 dirs[:] = []
1160 dirs[:] = []
1157 break
1161 break
1158
1162
1159 _rcpath = None
1163 _rcpath = None
1160
1164
1161 def rcpath():
1165 def rcpath():
1162 '''return hgrc search path. if env var HGRCPATH is set, use it.
1166 '''return hgrc search path. if env var HGRCPATH is set, use it.
1163 for each item in path, if directory, use files ending in .rc,
1167 for each item in path, if directory, use files ending in .rc,
1164 else use item.
1168 else use item.
1165 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1169 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1166 if no HGRCPATH, use default os-specific path.'''
1170 if no HGRCPATH, use default os-specific path.'''
1167 global _rcpath
1171 global _rcpath
1168 if _rcpath is None:
1172 if _rcpath is None:
1169 if 'HGRCPATH' in os.environ:
1173 if 'HGRCPATH' in os.environ:
1170 _rcpath = []
1174 _rcpath = []
1171 for p in os.environ['HGRCPATH'].split(os.pathsep):
1175 for p in os.environ['HGRCPATH'].split(os.pathsep):
1172 if not p: continue
1176 if not p: continue
1173 if os.path.isdir(p):
1177 if os.path.isdir(p):
1174 for f in os.listdir(p):
1178 for f in os.listdir(p):
1175 if f.endswith('.rc'):
1179 if f.endswith('.rc'):
1176 _rcpath.append(os.path.join(p, f))
1180 _rcpath.append(os.path.join(p, f))
1177 else:
1181 else:
1178 _rcpath.append(p)
1182 _rcpath.append(p)
1179 else:
1183 else:
1180 _rcpath = os_rcpath()
1184 _rcpath = os_rcpath()
1181 return _rcpath
1185 return _rcpath
1182
1186
1183 def bytecount(nbytes):
1187 def bytecount(nbytes):
1184 '''return byte count formatted as readable string, with units'''
1188 '''return byte count formatted as readable string, with units'''
1185
1189
1186 units = (
1190 units = (
1187 (100, 1<<30, _('%.0f GB')),
1191 (100, 1<<30, _('%.0f GB')),
1188 (10, 1<<30, _('%.1f GB')),
1192 (10, 1<<30, _('%.1f GB')),
1189 (1, 1<<30, _('%.2f GB')),
1193 (1, 1<<30, _('%.2f GB')),
1190 (100, 1<<20, _('%.0f MB')),
1194 (100, 1<<20, _('%.0f MB')),
1191 (10, 1<<20, _('%.1f MB')),
1195 (10, 1<<20, _('%.1f MB')),
1192 (1, 1<<20, _('%.2f MB')),
1196 (1, 1<<20, _('%.2f MB')),
1193 (100, 1<<10, _('%.0f KB')),
1197 (100, 1<<10, _('%.0f KB')),
1194 (10, 1<<10, _('%.1f KB')),
1198 (10, 1<<10, _('%.1f KB')),
1195 (1, 1<<10, _('%.2f KB')),
1199 (1, 1<<10, _('%.2f KB')),
1196 (1, 1, _('%.0f bytes')),
1200 (1, 1, _('%.0f bytes')),
1197 )
1201 )
1198
1202
1199 for multiplier, divisor, format in units:
1203 for multiplier, divisor, format in units:
1200 if nbytes >= divisor * multiplier:
1204 if nbytes >= divisor * multiplier:
1201 return format % (nbytes / float(divisor))
1205 return format % (nbytes / float(divisor))
1202 return units[-1][2] % nbytes
1206 return units[-1][2] % nbytes
1203
1207
1204 def drop_scheme(scheme, path):
1208 def drop_scheme(scheme, path):
1205 sc = scheme + ':'
1209 sc = scheme + ':'
1206 if path.startswith(sc):
1210 if path.startswith(sc):
1207 path = path[len(sc):]
1211 path = path[len(sc):]
1208 if path.startswith('//'):
1212 if path.startswith('//'):
1209 path = path[2:]
1213 path = path[2:]
1210 return path
1214 return path
@@ -1,113 +1,113 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0" -d "1000000 0"
9 hg commit -m "0.0" -d "1000000 0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1" -d "1000000 0"
13 hg commit -m "0.1" -d "1000000 0"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2" -d "1000000 0"
17 hg commit -m "0.2" -d "1000000 0"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3" -d "1000000 0"
21 hg commit -m "0.3" -d "1000000 0"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1" -d "1000000 0"
26 hg commit -m "1.1" -d "1000000 0"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2" -d "1000000 0"
30 hg commit -m "1.2" -d "1000000 0"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3" -d "1000000 0"
38 hg commit -m "1.3" -d "1000000 0"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m" -d "1000000 0"
40 hg commit -m "1.3m" -d "1000000 0"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m" -d "1000000 0"
43 hg commit -m "0.3m" -d "1000000 0"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 mkdir test-"$i"
52 mkdir test-"$i"
53 hg --cwd test-"$i" init
53 hg --cwd test-"$i" init
54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
55 cd test-"$i"
55 cd test-"$i"
56 hg unbundle ../test-"$i".hg
56 hg unbundle ../test-"$i".hg
57 hg verify
57 hg verify
58 hg tip -q
58 hg tip -q
59 cd ..
59 cd ..
60 done
60 done
61 cd test-8
61 cd test-8
62 hg pull ../test-7
62 hg pull ../test-7
63 hg verify
63 hg verify
64 hg rollback
64 hg rollback
65 cd ..
65 cd ..
66
66
67 echo % should fail
67 echo % should fail
68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
69 hg -R test bundle -r tip test-bundle-branch1.hg
69 hg -R test bundle -r tip test-bundle-branch1.hg
70
70
71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
73 hg -R test bundle --base 2 test-bundle-all.hg
73 hg -R test bundle --base 2 test-bundle-all.hg
74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
75
75
76 # issue76 msg2163
76 # issue76 msg2163
77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
78
78
79 hg clone test-2 test-9
79 hg clone test-2 test-9
80 cd test-9
80 cd test-9
81 echo % 2
81 echo % 2
82 hg tip -q
82 hg tip -q
83 hg unbundle ../test-bundle-should-fail.hg
83 hg unbundle ../test-bundle-should-fail.hg
84 echo % 2
84 echo % 2
85 hg tip -q
85 hg tip -q
86 hg unbundle ../test-bundle-all.hg
86 hg unbundle ../test-bundle-all.hg
87 echo % 8
87 echo % 8
88 hg tip -q
88 hg tip -q
89 hg verify
89 hg verify
90 hg rollback
90 hg rollback
91 echo % 2
91 echo % 2
92 hg tip -q
92 hg tip -q
93 hg unbundle ../test-bundle-branch1.hg
93 hg unbundle ../test-bundle-branch1.hg
94 echo % 4
94 echo % 4
95 hg tip -q
95 hg tip -q
96 hg verify
96 hg verify
97 hg rollback
97 hg rollback
98 hg unbundle ../test-bundle-branch2.hg
98 hg unbundle ../test-bundle-branch2.hg
99 echo % 6
99 echo % 6
100 hg tip -q
100 hg tip -q
101 hg verify
101 hg verify
102
102
103 cd ../test
103 cd ../test
104 hg merge 7
104 hg merge 7
105 hg ci -m merge -d "1000000 0"
105 hg ci -m merge -d "1000000 0"
106 cd ..
106 cd ..
107 hg -R test bundle --base 2 test-bundle-head.hg
107 hg -R test bundle --base 2 test-bundle-head.hg
108 hg clone test-2 test-10
108 hg clone test-2 test-10
109 cd test-10
109 cd test-10
110 hg unbundle ../test-bundle-head.hg
110 hg unbundle ../test-bundle-head.hg
111 echo % 9
111 echo % 9
112 hg tip -q
112 hg tip -q
113 hg verify
113 hg verify
@@ -1,59 +1,59 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0"
9 hg commit -m "0.0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1"
13 hg commit -m "0.1"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2"
17 hg commit -m "0.2"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3"
21 hg commit -m "0.3"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1"
26 hg commit -m "1.1"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2"
30 hg commit -m "1.2"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3"
38 hg commit -m "1.3"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m"
40 hg commit -m "1.3m"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m"
43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 hg clone -r "$i" test test-"$i"
52 hg clone -r "$i" test test-"$i"
53 cd test-"$i"
53 cd test-"$i"
54 hg verify
54 hg verify
55 cd ..
55 cd ..
56 done
56 done
57 cd test-8
57 cd test-8
58 hg pull ../test-7
58 hg pull ../test-7
59 hg verify
59 hg verify
@@ -1,14 +1,14 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init dir
3 hg init dir
4 cd dir
4 cd dir
5 echo bleh > bar
5 echo bleh > bar
6 hg add bar
6 hg add bar
7 hg ci -m 'add bar'
7 hg ci -m 'add bar'
8
8
9 hg cp bar foo
9 hg cp bar foo
10 echo >> bar
10 echo >> bar
11 hg ci -m 'cp bar foo; change bar'
11 hg ci -m 'cp bar foo; change bar'
12
12
13 hg debugrename foo
13 hg debugrename foo
14 hg debugindex .hg/data/bar.i
14 hg debugindex .hg/store/data/bar.i
@@ -1,30 +1,30 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo a > a
4 echo a > a
5 hg add a
5 hg add a
6 hg commit -m "1" -d "1000000 0"
6 hg commit -m "1" -d "1000000 0"
7 hg status
7 hg status
8 hg copy a b
8 hg copy a b
9 hg status
9 hg status
10 hg --debug commit -m "2" -d "1000000 0"
10 hg --debug commit -m "2" -d "1000000 0"
11 echo "we should see two history entries"
11 echo "we should see two history entries"
12 hg history -v
12 hg history -v
13 echo "we should see one log entry for a"
13 echo "we should see one log entry for a"
14 hg log a
14 hg log a
15 echo "this should show a revision linked to changeset 0"
15 echo "this should show a revision linked to changeset 0"
16 hg debugindex .hg/data/a.i
16 hg debugindex .hg/store/data/a.i
17 echo "we should see one log entry for b"
17 echo "we should see one log entry for b"
18 hg log b
18 hg log b
19 echo "this should show a revision linked to changeset 1"
19 echo "this should show a revision linked to changeset 1"
20 hg debugindex .hg/data/b.i
20 hg debugindex .hg/store/data/b.i
21
21
22 echo "this should show the rename information in the metadata"
22 echo "this should show the rename information in the metadata"
23 hg debugdata .hg/data/b.d 0 | head -3 | tail -2
23 hg debugdata .hg/store/data/b.d 0 | head -3 | tail -2
24
24
25 $TESTDIR/md5sum.py .hg/data/b.i
25 $TESTDIR/md5sum.py .hg/store/data/b.i
26 hg cat b > bsum
26 hg cat b > bsum
27 $TESTDIR/md5sum.py bsum
27 $TESTDIR/md5sum.py bsum
28 hg cat a > asum
28 hg cat a > asum
29 $TESTDIR/md5sum.py asum
29 $TESTDIR/md5sum.py asum
30 hg verify
30 hg verify
@@ -1,51 +1,51 b''
1 A b
1 A b
2 b
2 b
3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
4 we should see two history entries
4 we should see two history entries
5 changeset: 1:386a3cc01532
5 changeset: 1:386a3cc01532
6 tag: tip
6 tag: tip
7 user: test
7 user: test
8 date: Mon Jan 12 13:46:40 1970 +0000
8 date: Mon Jan 12 13:46:40 1970 +0000
9 files: b
9 files: b
10 description:
10 description:
11 2
11 2
12
12
13
13
14 changeset: 0:33aaa84a386b
14 changeset: 0:33aaa84a386b
15 user: test
15 user: test
16 date: Mon Jan 12 13:46:40 1970 +0000
16 date: Mon Jan 12 13:46:40 1970 +0000
17 files: a
17 files: a
18 description:
18 description:
19 1
19 1
20
20
21
21
22 we should see one log entry for a
22 we should see one log entry for a
23 changeset: 0:33aaa84a386b
23 changeset: 0:33aaa84a386b
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: 1
26 summary: 1
27
27
28 this should show a revision linked to changeset 0
28 this should show a revision linked to changeset 0
29 rev offset length base linkrev nodeid p1 p2
29 rev offset length base linkrev nodeid p1 p2
30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
31 we should see one log entry for b
31 we should see one log entry for b
32 changeset: 1:386a3cc01532
32 changeset: 1:386a3cc01532
33 tag: tip
33 tag: tip
34 user: test
34 user: test
35 date: Mon Jan 12 13:46:40 1970 +0000
35 date: Mon Jan 12 13:46:40 1970 +0000
36 summary: 2
36 summary: 2
37
37
38 this should show a revision linked to changeset 1
38 this should show a revision linked to changeset 1
39 rev offset length base linkrev nodeid p1 p2
39 rev offset length base linkrev nodeid p1 p2
40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
41 this should show the rename information in the metadata
41 this should show the rename information in the metadata
42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
43 copy: a
43 copy: a
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/data/b.i
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/store/data/b.i
45 60b725f10c9c85c70d97880dfe8191b3 bsum
45 60b725f10c9c85c70d97880dfe8191b3 bsum
46 60b725f10c9c85c70d97880dfe8191b3 asum
46 60b725f10c9c85c70d97880dfe8191b3 asum
47 checking changesets
47 checking changesets
48 checking manifests
48 checking manifests
49 crosschecking files in changesets and manifests
49 crosschecking files in changesets and manifests
50 checking files
50 checking files
51 2 files, 2 changesets, 2 total revisions
51 2 files, 2 changesets, 2 total revisions
@@ -1,41 +1,41 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo foo > foo
4 echo foo > foo
5 hg add foo
5 hg add foo
6 hg commit -m1 -d"0 0"
6 hg commit -m1 -d"0 0"
7
7
8 echo "# should show copy"
8 echo "# should show copy"
9 hg copy foo bar
9 hg copy foo bar
10 hg debugstate|grep '^copy'
10 hg debugstate|grep '^copy'
11
11
12 echo "# shouldn't show copy"
12 echo "# shouldn't show copy"
13 hg commit -m2 -d"0 0"
13 hg commit -m2 -d"0 0"
14 hg debugstate|grep '^copy'
14 hg debugstate|grep '^copy'
15
15
16 echo "# should match"
16 echo "# should match"
17 hg debugindex .hg/data/foo.i
17 hg debugindex .hg/store/data/foo.i
18 hg debugrename bar
18 hg debugrename bar
19
19
20 echo bleah > foo
20 echo bleah > foo
21 echo quux > bar
21 echo quux > bar
22 hg commit -m3 -d"0 0"
22 hg commit -m3 -d"0 0"
23
23
24 echo "# should not be renamed"
24 echo "# should not be renamed"
25 hg debugrename bar
25 hg debugrename bar
26
26
27 hg copy -f foo bar
27 hg copy -f foo bar
28 echo "# should show copy"
28 echo "# should show copy"
29 hg debugstate|grep '^copy'
29 hg debugstate|grep '^copy'
30 hg commit -m3 -d"0 0"
30 hg commit -m3 -d"0 0"
31
31
32 echo "# should show no parents for tip"
32 echo "# should show no parents for tip"
33 hg debugindex .hg/data/bar.i
33 hg debugindex .hg/store/data/bar.i
34 echo "# should match"
34 echo "# should match"
35 hg debugindex .hg/data/foo.i
35 hg debugindex .hg/store/data/foo.i
36 hg debugrename bar
36 hg debugrename bar
37
37
38 echo "# should show no copies"
38 echo "# should show no copies"
39 hg debugstate|grep '^copy'
39 hg debugstate|grep '^copy'
40
40
41 exit 0
41 exit 0
@@ -1,49 +1,49 b''
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # A B
3 # A B
4 #
4 #
5 # 3 4 3
5 # 3 4 3
6 # |\/| |\
6 # |\/| |\
7 # |/\| | \
7 # |/\| | \
8 # 1 2 1 2
8 # 1 2 1 2
9 # \ / \ /
9 # \ / \ /
10 # 0 0
10 # 0 0
11 #
11 #
12 # if the result of the merge of 1 and 2
12 # if the result of the merge of 1 and 2
13 # is the same in 3 and 4, no new manifest
13 # is the same in 3 and 4, no new manifest
14 # will be created and the manifest group
14 # will be created and the manifest group
15 # will be empty during the pull
15 # will be empty during the pull
16 #
16 #
17 # (plus we test a failure where outgoing
17 # (plus we test a failure where outgoing
18 # wrongly reported the number of csets)
18 # wrongly reported the number of csets)
19 #
19 #
20
20
21 hg init a
21 hg init a
22 cd a
22 cd a
23 touch init
23 touch init
24 hg ci -A -m 0 -d "1000000 0"
24 hg ci -A -m 0 -d "1000000 0"
25 touch x y
25 touch x y
26 hg ci -A -m 1 -d "1000000 0"
26 hg ci -A -m 1 -d "1000000 0"
27 hg update 0
27 hg update 0
28 touch x y
28 touch x y
29 hg ci -A -m 2 -d "1000000 0"
29 hg ci -A -m 2 -d "1000000 0"
30 hg merge 1
30 hg merge 1
31 hg ci -A -m m1 -d "1000000 0"
31 hg ci -A -m m1 -d "1000000 0"
32 #hg log
32 #hg log
33 #hg debugindex .hg/00manifest.i
33 #hg debugindex .hg/store/00manifest.i
34 hg update -C 1
34 hg update -C 1
35 hg merge 2
35 hg merge 2
36 hg ci -A -m m2 -d "1000000 0"
36 hg ci -A -m m2 -d "1000000 0"
37 #hg log
37 #hg log
38 #hg debugindex .hg/00manifest.i
38 #hg debugindex .hg/store/00manifest.i
39
39
40 cd ..
40 cd ..
41 hg clone -r 3 a b
41 hg clone -r 3 a b
42 hg clone -r 4 a c
42 hg clone -r 4 a c
43 hg -R a outgoing b
43 hg -R a outgoing b
44 hg -R a outgoing c
44 hg -R a outgoing c
45 hg -R b outgoing c
45 hg -R b outgoing c
46 hg -R c outgoing b
46 hg -R c outgoing b
47
47
48 hg -R b pull a
48 hg -R b pull a
49 hg -R c pull a
49 hg -R c pull a
@@ -1,34 +1,34 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4
4
5 cat > .hg/hgrc <<EOF
5 cat > .hg/hgrc <<EOF
6 [encode]
6 [encode]
7 *.gz = gunzip
7 *.gz = gunzip
8
8
9 [decode]
9 [decode]
10 *.gz = gzip
10 *.gz = gzip
11
11
12 EOF
12 EOF
13
13
14 echo "this is a test" | gzip > a.gz
14 echo "this is a test" | gzip > a.gz
15 hg add a.gz
15 hg add a.gz
16 hg ci -m "test" -d "1000000 0"
16 hg ci -m "test" -d "1000000 0"
17 echo %% no changes
17 echo %% no changes
18 hg status
18 hg status
19 touch a.gz
19 touch a.gz
20
20
21 echo %% no changes
21 echo %% no changes
22 hg status
22 hg status
23
23
24 echo %% uncompressed contents in repo
24 echo %% uncompressed contents in repo
25 hg debugdata .hg/data/a.gz.d 0
25 hg debugdata .hg/store/data/a.gz.d 0
26
26
27 echo %% uncompress our working dir copy
27 echo %% uncompress our working dir copy
28 gunzip < a.gz
28 gunzip < a.gz
29
29
30 rm a.gz
30 rm a.gz
31 hg co
31 hg co
32
32
33 echo %% uncompress our new working dir copy
33 echo %% uncompress our new working dir copy
34 gunzip < a.gz
34 gunzip < a.gz
@@ -1,46 +1,46 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4
4
5 echo foo > a
5 echo foo > a
6 echo foo > b
6 echo foo > b
7 hg add a b
7 hg add a b
8
8
9 hg ci -m "test" -d "1000000 0"
9 hg ci -m "test" -d "1000000 0"
10
10
11 echo blah > a
11 echo blah > a
12
12
13 hg ci -m "branch a" -d "1000000 0"
13 hg ci -m "branch a" -d "1000000 0"
14
14
15 hg co 0
15 hg co 0
16
16
17 echo blah > b
17 echo blah > b
18
18
19 hg ci -m "branch b" -d "1000000 0"
19 hg ci -m "branch b" -d "1000000 0"
20 HGMERGE=true hg merge 1
20 HGMERGE=true hg merge 1
21
21
22 hg ci -m "merge b/a -> blah" -d "1000000 0"
22 hg ci -m "merge b/a -> blah" -d "1000000 0"
23
23
24 hg co 1
24 hg co 1
25 HGMERGE=true hg merge 2
25 HGMERGE=true hg merge 2
26 hg ci -m "merge a/b -> blah" -d "1000000 0"
26 hg ci -m "merge a/b -> blah" -d "1000000 0"
27
27
28 hg log
28 hg log
29 hg debugindex .hg/00changelog.i
29 hg debugindex .hg/store/00changelog.i
30
30
31 echo
31 echo
32
32
33 echo 1
33 echo 1
34 hg manifest --debug 1
34 hg manifest --debug 1
35 echo 2
35 echo 2
36 hg manifest --debug 2
36 hg manifest --debug 2
37 echo 3
37 echo 3
38 hg manifest --debug 3
38 hg manifest --debug 3
39 echo 4
39 echo 4
40 hg manifest --debug 4
40 hg manifest --debug 4
41
41
42 echo
42 echo
43
43
44 hg debugindex .hg/data/a.i
44 hg debugindex .hg/store/data/a.i
45
45
46 hg verify
46 hg verify
@@ -1,79 +1,79 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test makes sure that we don't mark a file as merged with its ancestor
3 # This test makes sure that we don't mark a file as merged with its ancestor
4 # when we do a merge.
4 # when we do a merge.
5
5
6 cat <<'EOF' > merge
6 cat <<'EOF' > merge
7 #!/bin/sh
7 #!/bin/sh
8 echo merging for `basename $1`
8 echo merging for `basename $1`
9 EOF
9 EOF
10 chmod +x merge
10 chmod +x merge
11
11
12 echo creating base
12 echo creating base
13 hg init a
13 hg init a
14 cd a
14 cd a
15 echo 1 > foo
15 echo 1 > foo
16 echo 1 > bar
16 echo 1 > bar
17 echo 1 > baz
17 echo 1 > baz
18 echo 1 > quux
18 echo 1 > quux
19 hg add foo bar baz quux
19 hg add foo bar baz quux
20 hg commit -m "base" -d "1000000 0"
20 hg commit -m "base" -d "1000000 0"
21
21
22 cd ..
22 cd ..
23 hg clone a b
23 hg clone a b
24
24
25 echo creating branch a
25 echo creating branch a
26 cd a
26 cd a
27 echo 2a > foo
27 echo 2a > foo
28 echo 2a > bar
28 echo 2a > bar
29 hg commit -m "branch a" -d "1000000 0"
29 hg commit -m "branch a" -d "1000000 0"
30
30
31 echo creating branch b
31 echo creating branch b
32
32
33 cd ..
33 cd ..
34 cd b
34 cd b
35 echo 2b > foo
35 echo 2b > foo
36 echo 2b > baz
36 echo 2b > baz
37 hg commit -m "branch b" -d "1000000 0"
37 hg commit -m "branch b" -d "1000000 0"
38
38
39 echo "we shouldn't have anything but n state here"
39 echo "we shouldn't have anything but n state here"
40 hg debugstate | cut -b 1-16,35-
40 hg debugstate | cut -b 1-16,35-
41
41
42 echo merging
42 echo merging
43 hg pull ../a
43 hg pull ../a
44 env HGMERGE=../merge hg merge -v
44 env HGMERGE=../merge hg merge -v
45
45
46 echo 2m > foo
46 echo 2m > foo
47 echo 2b > baz
47 echo 2b > baz
48 echo new > quux
48 echo new > quux
49
49
50 echo "we shouldn't have anything but foo in merge state here"
50 echo "we shouldn't have anything but foo in merge state here"
51 hg debugstate | cut -b 1-16,35- | grep "^m"
51 hg debugstate | cut -b 1-16,35- | grep "^m"
52
52
53 hg ci -m "merge" -d "1000000 0"
53 hg ci -m "merge" -d "1000000 0"
54
54
55 echo "main: we should have a merge here"
55 echo "main: we should have a merge here"
56 hg debugindex .hg/00changelog.i
56 hg debugindex .hg/store/00changelog.i
57
57
58 echo "log should show foo and quux changed"
58 echo "log should show foo and quux changed"
59 hg log -v -r tip
59 hg log -v -r tip
60
60
61 echo "foo: we should have a merge here"
61 echo "foo: we should have a merge here"
62 hg debugindex .hg/data/foo.i
62 hg debugindex .hg/store/data/foo.i
63
63
64 echo "bar: we shouldn't have a merge here"
64 echo "bar: we shouldn't have a merge here"
65 hg debugindex .hg/data/bar.i
65 hg debugindex .hg/store/data/bar.i
66
66
67 echo "baz: we shouldn't have a merge here"
67 echo "baz: we shouldn't have a merge here"
68 hg debugindex .hg/data/baz.i
68 hg debugindex .hg/store/data/baz.i
69
69
70 echo "quux: we shouldn't have a merge here"
70 echo "quux: we shouldn't have a merge here"
71 hg debugindex .hg/data/quux.i
71 hg debugindex .hg/store/data/quux.i
72
72
73 echo "manifest entries should match tips of all files"
73 echo "manifest entries should match tips of all files"
74 hg manifest --debug
74 hg manifest --debug
75
75
76 echo "everything should be clean now"
76 echo "everything should be clean now"
77 hg status
77 hg status
78
78
79 hg verify
79 hg verify
@@ -1,48 +1,48 b''
1 #!/bin/sh -e
1 #!/bin/sh -e
2
2
3 umask 027
3 umask 027
4 mkdir test1
4 mkdir test1
5 cd test1
5 cd test1
6
6
7 hg init
7 hg init
8 touch a b
8 touch a b
9 hg add a b
9 hg add a b
10 hg ci -m "added a b" -d "1000000 0"
10 hg ci -m "added a b" -d "1000000 0"
11
11
12 cd ..
12 cd ..
13 hg clone test1 test3
13 hg clone test1 test3
14 mkdir test2
14 mkdir test2
15 cd test2
15 cd test2
16
16
17 hg init
17 hg init
18 hg pull ../test1
18 hg pull ../test1
19 hg co
19 hg co
20 chmod +x a
20 chmod +x a
21 hg ci -m "chmod +x a" -d "1000000 0"
21 hg ci -m "chmod +x a" -d "1000000 0"
22
22
23 cd ../test1
23 cd ../test1
24 echo 123 >>a
24 echo 123 >>a
25 hg ci -m "a updated" -d "1000000 0"
25 hg ci -m "a updated" -d "1000000 0"
26
26
27 hg pull ../test2
27 hg pull ../test2
28 hg heads
28 hg heads
29 hg history
29 hg history
30
30
31 hg -v merge
31 hg -v merge
32
32
33 cd ../test3
33 cd ../test3
34 echo 123 >>b
34 echo 123 >>b
35 hg ci -m "b updated" -d "1000000 0"
35 hg ci -m "b updated" -d "1000000 0"
36
36
37 hg pull ../test2
37 hg pull ../test2
38 hg heads
38 hg heads
39 hg history
39 hg history
40
40
41 hg -v merge
41 hg -v merge
42
42
43 ls -l ../test[123]/a > foo
43 ls -l ../test[123]/a > foo
44 cut -b 1-10 < foo
44 cut -b 1-10 < foo
45
45
46 hg debugindex .hg/data/a.i
46 hg debugindex .hg/store/data/a.i
47 hg debugindex ../test2/.hg/data/a.i
47 hg debugindex ../test2/.hg/store/data/a.i
48 hg debugindex ../test1/.hg/data/a.i
48 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,78 +1,78 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init remote
3 hg init remote
4 cd remote
4 cd remote
5 echo "# creating 'remote'"
5 echo "# creating 'remote'"
6 cat >>afile <<EOF
6 cat >>afile <<EOF
7 0
7 0
8 EOF
8 EOF
9 hg add afile
9 hg add afile
10 hg commit -m "0.0"
10 hg commit -m "0.0"
11 cat >>afile <<EOF
11 cat >>afile <<EOF
12 1
12 1
13 EOF
13 EOF
14 hg commit -m "0.1"
14 hg commit -m "0.1"
15 cat >>afile <<EOF
15 cat >>afile <<EOF
16 2
16 2
17 EOF
17 EOF
18 hg commit -m "0.2"
18 hg commit -m "0.2"
19 cat >>afile <<EOF
19 cat >>afile <<EOF
20 3
20 3
21 EOF
21 EOF
22 hg commit -m "0.3"
22 hg commit -m "0.3"
23 hg update -C 0
23 hg update -C 0
24 cat >>afile <<EOF
24 cat >>afile <<EOF
25 1
25 1
26 EOF
26 EOF
27 hg commit -m "1.1"
27 hg commit -m "1.1"
28 cat >>afile <<EOF
28 cat >>afile <<EOF
29 2
29 2
30 EOF
30 EOF
31 hg commit -m "1.2"
31 hg commit -m "1.2"
32 cat >fred <<EOF
32 cat >fred <<EOF
33 a line
33 a line
34 EOF
34 EOF
35 cat >>afile <<EOF
35 cat >>afile <<EOF
36 3
36 3
37 EOF
37 EOF
38 hg add fred
38 hg add fred
39 hg commit -m "1.3"
39 hg commit -m "1.3"
40 hg mv afile adifferentfile
40 hg mv afile adifferentfile
41 hg commit -m "1.3m"
41 hg commit -m "1.3m"
42 hg update -C 3
42 hg update -C 3
43 hg mv afile anotherfile
43 hg mv afile anotherfile
44 hg commit -m "0.3m"
44 hg commit -m "0.3m"
45 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/store/data/afile.i
46 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/store/data/adifferentfile.i
47 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/store/data/anotherfile.i
48 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/store/data/fred.i
49 hg debugindex .hg/00manifest.i
49 hg debugindex .hg/store/00manifest.i
50 hg verify
50 hg verify
51 echo "# Starting server"
51 echo "# Starting server"
52 hg serve -p 20061 -d --pid-file=../hg1.pid
52 hg serve -p 20061 -d --pid-file=../hg1.pid
53 cd ..
53 cd ..
54 cat hg1.pid >> $DAEMON_PIDS
54 cat hg1.pid >> $DAEMON_PIDS
55
55
56 echo "# clone remote via stream"
56 echo "# clone remote via stream"
57 for i in 0 1 2 3 4 5 6 7 8; do
57 for i in 0 1 2 3 4 5 6 7 8; do
58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
59 if cd test-"$i"; then
59 if cd test-"$i"; then
60 hg verify
60 hg verify
61 cd ..
61 cd ..
62 fi
62 fi
63 done
63 done
64 cd test-8
64 cd test-8
65 hg pull ../test-7
65 hg pull ../test-7
66 hg verify
66 hg verify
67 cd ..
67 cd ..
68 cd test-1
68 cd test-1
69 hg pull -r 4 http://localhost:20061/ 2>&1
69 hg pull -r 4 http://localhost:20061/ 2>&1
70 hg verify
70 hg verify
71 hg pull http://localhost:20061/ 2>&1
71 hg pull http://localhost:20061/ 2>&1
72 cd ..
72 cd ..
73 cd test-2
73 cd test-2
74 hg pull -r 5 http://localhost:20061/ 2>&1
74 hg pull -r 5 http://localhost:20061/ 2>&1
75 hg verify
75 hg verify
76 hg pull http://localhost:20061/ 2>&1
76 hg pull http://localhost:20061/ 2>&1
77 hg verify
77 hg verify
78 cd ..
78 cd ..
@@ -1,16 +1,16 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 mkfifo p
4 mkfifo p
5
5
6 hg serve --stdio < p &
6 hg serve --stdio < p &
7 P=$!
7 P=$!
8 (echo lock; echo addchangegroup; sleep 5) > p &
8 (echo lock; echo addchangegroup; sleep 5) > p &
9 Q=$!
9 Q=$!
10 sleep 3
10 sleep 3
11 kill -HUP $P
11 kill -HUP $P
12 wait
12 wait
13 ls .hg
13 ls -R .hg
14
14
15
15
16
16
@@ -1,9 +1,14 b''
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 killed!
4 killed!
5 transaction abort!
5 transaction abort!
6 rollback completed
6 rollback completed
7 .hg:
7 00changelog.i
8 00changelog.i
8 journal.dirstate
9 journal.dirstate
9 requires
10 requires
11 store
12
13 .hg/store:
14 00changelog.i
@@ -1,11 +1,11 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init a
3 hg init a
4 echo a > a/a
4 echo a > a/a
5 hg --cwd a ci -A -m a
5 hg --cwd a ci -A -m a
6 hg clone a b
6 hg clone a b
7 echo b > b/b
7 echo b > b/b
8 hg --cwd b ci -A -m b
8 hg --cwd b ci -A -m b
9 chmod 100 a/.hg
9 chmod 100 a/.hg/store
10 hg --cwd b push ../a
10 hg --cwd b push ../a
11 chmod 700 a/.hg
11 chmod 700 a/.hg/store
@@ -1,66 +1,66 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # initial
3 # initial
4 hg init test-a
4 hg init test-a
5 cd test-a
5 cd test-a
6 cat >test.txt <<"EOF"
6 cat >test.txt <<"EOF"
7 1
7 1
8 2
8 2
9 3
9 3
10 EOF
10 EOF
11 hg add test.txt
11 hg add test.txt
12 hg commit -m "Initial" -d "1000000 0"
12 hg commit -m "Initial" -d "1000000 0"
13
13
14 # clone
14 # clone
15 cd ..
15 cd ..
16 hg clone test-a test-b
16 hg clone test-a test-b
17
17
18 # change test-a
18 # change test-a
19 cd test-a
19 cd test-a
20 cat >test.txt <<"EOF"
20 cat >test.txt <<"EOF"
21 one
21 one
22 two
22 two
23 three
23 three
24 EOF
24 EOF
25 hg commit -m "Numbers as words" -d "1000000 0"
25 hg commit -m "Numbers as words" -d "1000000 0"
26
26
27 # change test-b
27 # change test-b
28 cd ../test-b
28 cd ../test-b
29 cat >test.txt <<"EOF"
29 cat >test.txt <<"EOF"
30 1
30 1
31 2.5
31 2.5
32 3
32 3
33 EOF
33 EOF
34 hg commit -m "2 -> 2.5" -d "1000000 0"
34 hg commit -m "2 -> 2.5" -d "1000000 0"
35
35
36 # now pull and merge from test-a
36 # now pull and merge from test-a
37 hg pull ../test-a
37 hg pull ../test-a
38 HGMERGE=merge hg merge
38 HGMERGE=merge hg merge
39 # resolve conflict
39 # resolve conflict
40 cat >test.txt <<"EOF"
40 cat >test.txt <<"EOF"
41 one
41 one
42 two-point-five
42 two-point-five
43 three
43 three
44 EOF
44 EOF
45 rm -f *.orig
45 rm -f *.orig
46 hg commit -m "Merge 1" -d "1000000 0"
46 hg commit -m "Merge 1" -d "1000000 0"
47
47
48 # change test-a again
48 # change test-a again
49 cd ../test-a
49 cd ../test-a
50 cat >test.txt <<"EOF"
50 cat >test.txt <<"EOF"
51 one
51 one
52 two-point-one
52 two-point-one
53 three
53 three
54 EOF
54 EOF
55 hg commit -m "two -> two-point-one" -d "1000000 0"
55 hg commit -m "two -> two-point-one" -d "1000000 0"
56
56
57 # pull and merge from test-a again
57 # pull and merge from test-a again
58 cd ../test-b
58 cd ../test-b
59 hg pull ../test-a
59 hg pull ../test-a
60 HGMERGE=merge hg merge --debug
60 HGMERGE=merge hg merge --debug
61
61
62 cat test.txt | sed "s% .*%%"
62 cat test.txt | sed "s% .*%%"
63
63
64 hg debugindex .hg/data/test.txt.i
64 hg debugindex .hg/store/data/test.txt.i
65
65
66 hg log
66 hg log
@@ -1,52 +1,52 b''
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # revlog.parseindex must be able to parse the index file even if
3 # revlog.parseindex must be able to parse the index file even if
4 # an index entry is split between two 64k blocks. The ideal test
4 # an index entry is split between two 64k blocks. The ideal test
5 # would be to create an index file with inline data where
5 # would be to create an index file with inline data where
6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
7 # the size of an index entry) and with an index entry starting right
7 # the size of an index entry) and with an index entry starting right
8 # before the 64k block boundary, and try to read it.
8 # before the 64k block boundary, and try to read it.
9 #
9 #
10 # We approximate that by reducing the read buffer to 1 byte.
10 # We approximate that by reducing the read buffer to 1 byte.
11 #
11 #
12
12
13 hg init a
13 hg init a
14 cd a
14 cd a
15 echo abc > foo
15 echo abc > foo
16 hg add foo
16 hg add foo
17 hg commit -m 'add foo' -d '1000000 0'
17 hg commit -m 'add foo' -d '1000000 0'
18
18
19 echo >> foo
19 echo >> foo
20 hg commit -m 'change foo' -d '1000001 0'
20 hg commit -m 'change foo' -d '1000001 0'
21 hg log -r 0:
21 hg log -r 0:
22
22
23 cat >> test.py << EOF
23 cat >> test.py << EOF
24 from mercurial import changelog, util
24 from mercurial import changelog, util
25 from mercurial.node import *
25 from mercurial.node import *
26
26
27 class singlebyteread(object):
27 class singlebyteread(object):
28 def __init__(self, real):
28 def __init__(self, real):
29 self.real = real
29 self.real = real
30
30
31 def read(self, size=-1):
31 def read(self, size=-1):
32 if size == 65536:
32 if size == 65536:
33 size = 1
33 size = 1
34 return self.real.read(size)
34 return self.real.read(size)
35
35
36 def __getattr__(self, key):
36 def __getattr__(self, key):
37 return getattr(self.real, key)
37 return getattr(self.real, key)
38
38
39 def opener(*args):
39 def opener(*args):
40 o = util.opener(*args)
40 o = util.opener(*args)
41 def wrapper(*a):
41 def wrapper(*a):
42 f = o(*a)
42 f = o(*a)
43 return singlebyteread(f)
43 return singlebyteread(f)
44 return wrapper
44 return wrapper
45
45
46 cl = changelog.changelog(opener('.hg'))
46 cl = changelog.changelog(opener('.hg/store'))
47 print cl.count(), 'revisions:'
47 print cl.count(), 'revisions:'
48 for r in xrange(cl.count()):
48 for r in xrange(cl.count()):
49 print short(cl.node(r))
49 print short(cl.node(r))
50 EOF
50 EOF
51
51
52 python test.py
52 python test.py
@@ -1,15 +1,15 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init
3 hg init
4 echo foo > a
4 echo foo > a
5 hg add a
5 hg add a
6 hg commit -m "1" -d "1000000 0"
6 hg commit -m "1" -d "1000000 0"
7 hg verify
7 hg verify
8 chmod -r .hg/data/a.i
8 chmod -r .hg/store/data/a.i
9 hg verify 2>/dev/null || echo verify failed
9 hg verify 2>/dev/null || echo verify failed
10 chmod +r .hg/data/a.i
10 chmod +r .hg/store/data/a.i
11 hg verify 2>/dev/null || echo verify failed
11 hg verify 2>/dev/null || echo verify failed
12 chmod -w .hg/data/a.i
12 chmod -w .hg/store/data/a.i
13 echo barber > a
13 echo barber > a
14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
15
15
@@ -1,19 +1,19 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > b
6 echo foo > b
7 hg add b
7 hg add b
8 hg ci -m "b" -d "1000000 0"
8 hg ci -m "b" -d "1000000 0"
9
9
10 chmod -w .hg
10 chmod -w .hg/store
11
11
12 cd ..
12 cd ..
13
13
14 hg clone a b
14 hg clone a b
15
15
16 chmod +w a/.hg # let test clean up
16 chmod +w a/.hg/store # let test clean up
17
17
18 cd b
18 cd b
19 hg verify
19 hg verify
@@ -1,61 +1,61 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 cat >>afile <<EOF
5 cat >>afile <<EOF
6 0
6 0
7 EOF
7 EOF
8 hg add afile
8 hg add afile
9 hg commit -m "0.0"
9 hg commit -m "0.0"
10 cat >>afile <<EOF
10 cat >>afile <<EOF
11 1
11 1
12 EOF
12 EOF
13 hg commit -m "0.1"
13 hg commit -m "0.1"
14 cat >>afile <<EOF
14 cat >>afile <<EOF
15 2
15 2
16 EOF
16 EOF
17 hg commit -m "0.2"
17 hg commit -m "0.2"
18 cat >>afile <<EOF
18 cat >>afile <<EOF
19 3
19 3
20 EOF
20 EOF
21 hg commit -m "0.3"
21 hg commit -m "0.3"
22 hg update -C 0
22 hg update -C 0
23 cat >>afile <<EOF
23 cat >>afile <<EOF
24 1
24 1
25 EOF
25 EOF
26 hg commit -m "1.1"
26 hg commit -m "1.1"
27 cat >>afile <<EOF
27 cat >>afile <<EOF
28 2
28 2
29 EOF
29 EOF
30 hg commit -m "1.2"
30 hg commit -m "1.2"
31 cat >fred <<EOF
31 cat >fred <<EOF
32 a line
32 a line
33 EOF
33 EOF
34 cat >>afile <<EOF
34 cat >>afile <<EOF
35 3
35 3
36 EOF
36 EOF
37 hg add fred
37 hg add fred
38 hg commit -m "1.3"
38 hg commit -m "1.3"
39 hg mv afile adifferentfile
39 hg mv afile adifferentfile
40 hg commit -m "1.3m"
40 hg commit -m "1.3m"
41 hg update -C 3
41 hg update -C 3
42 hg mv afile anotherfile
42 hg mv afile anotherfile
43 hg commit -m "0.3m"
43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/00manifest.i
48 hg debugindex .hg/store/00manifest.i
49 hg verify
49 hg verify
50 cd ..
50 cd ..
51 for i in 0 1 2 3 4 5 6 7 8; do
51 for i in 0 1 2 3 4 5 6 7 8; do
52 mkdir test-"$i"
52 mkdir test-"$i"
53 hg --cwd test-"$i" init
53 hg --cwd test-"$i" init
54 hg -R test push -r "$i" test-"$i"
54 hg -R test push -r "$i" test-"$i"
55 cd test-"$i"
55 cd test-"$i"
56 hg verify
56 hg verify
57 cd ..
57 cd ..
58 done
58 done
59 cd test-8
59 cd test-8
60 hg pull ../test-7
60 hg pull ../test-7
61 hg verify
61 hg verify
@@ -1,27 +1,27 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo "[merge]" >> .hg/hgrc
6 echo "[merge]" >> .hg/hgrc
7 echo "followcopies = 1" >> .hg/hgrc
7 echo "followcopies = 1" >> .hg/hgrc
8 echo foo > a
8 echo foo > a
9 echo foo > a2
9 echo foo > a2
10 hg add a a2
10 hg add a a2
11 hg ci -m "start" -d "0 0"
11 hg ci -m "start" -d "0 0"
12 hg mv a b
12 hg mv a b
13 hg mv a2 b2
13 hg mv a2 b2
14 hg ci -m "rename" -d "0 0"
14 hg ci -m "rename" -d "0 0"
15 echo "checkout"
15 echo "checkout"
16 hg co 0
16 hg co 0
17 echo blahblah > a
17 echo blahblah > a
18 echo blahblah > a2
18 echo blahblah > a2
19 hg mv a2 c2
19 hg mv a2 c2
20 hg ci -m "modify" -d "0 0"
20 hg ci -m "modify" -d "0 0"
21 echo "merge"
21 echo "merge"
22 hg merge -y --debug
22 hg merge -y --debug
23 hg status -AC
23 hg status -AC
24 cat b
24 cat b
25 hg ci -m "merge" -d "0 0"
25 hg ci -m "merge" -d "0 0"
26 hg debugindex .hg/data/b.i
26 hg debugindex .hg/store/data/b.i
27 hg debugrename b No newline at end of file
27 hg debugrename b
@@ -1,7 +1,2 b''
1 changeset: 0:0acdaf898367
1 abort: index 00changelog.i unknown format 2!
2 tag: tip
3 user: test
4 date: Mon Jan 12 13:46:40 1970 +0000
5 summary: test
6
7 abort: requirement 'indoor-pool' not supported!
2 abort: requirement 'indoor-pool' not supported!
@@ -1,99 +1,100 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 SSH_CLIENT='127.0.0.1 1 2'
20 SSH_CLIENT='127.0.0.1 1 2'
21 export SSH_CLIENT
21 export SSH_CLIENT
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 $2
23 $2
24 EOF
24 EOF
25 chmod +x dummyssh
25 chmod +x dummyssh
26
26
27 echo "# creating 'remote'"
27 echo "# creating 'remote'"
28 hg init remote
28 hg init remote
29 cd remote
29 cd remote
30 echo this > foo
30 echo this > foo
31 hg ci -A -m "init" -d "1000000 0" foo
31 echo this > fooO
32 hg ci -A -m "init" -d "1000000 0" foo fooO
32 echo '[server]' > .hg/hgrc
33 echo '[server]' > .hg/hgrc
33 echo 'uncompressed = True' >> .hg/hgrc
34 echo 'uncompressed = True' >> .hg/hgrc
34 echo '[hooks]' >> .hg/hgrc
35 echo '[hooks]' >> .hg/hgrc
35 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36
37
37 cd ..
38 cd ..
38
39
39 echo "# repo not found error"
40 echo "# repo not found error"
40 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41
42
42 echo "# clone remote via stream"
43 echo "# clone remote via stream"
43 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 cd local-stream
46 cd local-stream
46 hg verify
47 hg verify
47 cd ..
48 cd ..
48
49
49 echo "# clone remote via pull"
50 echo "# clone remote via pull"
50 hg clone -e ./dummyssh ssh://user@dummy/remote local
51 hg clone -e ./dummyssh ssh://user@dummy/remote local
51
52
52 echo "# verify"
53 echo "# verify"
53 cd local
54 cd local
54 hg verify
55 hg verify
55
56
56 echo '[hooks]' >> .hg/hgrc
57 echo '[hooks]' >> .hg/hgrc
57 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58
59
59 echo "# empty default pull"
60 echo "# empty default pull"
60 hg paths
61 hg paths
61 hg pull -e ../dummyssh
62 hg pull -e ../dummyssh
62
63
63 echo "# local change"
64 echo "# local change"
64 echo bleah > foo
65 echo bleah > foo
65 hg ci -m "add" -d "1000000 0"
66 hg ci -m "add" -d "1000000 0"
66
67
67 echo "# updating rc"
68 echo "# updating rc"
68 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 echo "[ui]" >> .hg/hgrc
70 echo "[ui]" >> .hg/hgrc
70 echo "ssh = ../dummyssh" >> .hg/hgrc
71 echo "ssh = ../dummyssh" >> .hg/hgrc
71
72
72 echo "# find outgoing"
73 echo "# find outgoing"
73 hg out ssh://user@dummy/remote
74 hg out ssh://user@dummy/remote
74
75
75 echo "# find incoming on the remote side"
76 echo "# find incoming on the remote side"
76 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77
78
78 echo "# push"
79 echo "# push"
79 hg push
80 hg push
80
81
81 cd ../remote
82 cd ../remote
82
83
83 echo "# check remote tip"
84 echo "# check remote tip"
84 hg tip
85 hg tip
85 hg verify
86 hg verify
86 hg cat -r tip foo
87 hg cat -r tip foo
87
88
88 echo z > z
89 echo z > z
89 hg ci -A -m z -d '1000001 0' z
90 hg ci -A -m z -d '1000001 0' z
90
91
91 cd ../local
92 cd ../local
92 echo r > r
93 echo r > r
93 hg ci -A -m z -d '1000002 0' r
94 hg ci -A -m z -d '1000002 0' r
94
95
95 echo "# push should succeed"
96 echo "# push should succeed"
96 hg push
97 hg push
97
98
98 cd ..
99 cd ..
99 cat dummylog
100 cat dummylog
@@ -1,99 +1,99 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 # This test tries to exercise the ssh functionality with a dummy script
3 # This test tries to exercise the ssh functionality with a dummy script
4
4
5 cat <<'EOF' > dummyssh
5 cat <<'EOF' > dummyssh
6 #!/bin/sh
6 #!/bin/sh
7 # this attempts to deal with relative pathnames
7 # this attempts to deal with relative pathnames
8 cd `dirname $0`
8 cd `dirname $0`
9
9
10 # check for proper args
10 # check for proper args
11 if [ $1 != "user@dummy" ] ; then
11 if [ $1 != "user@dummy" ] ; then
12 exit -1
12 exit -1
13 fi
13 fi
14
14
15 # check that we're in the right directory
15 # check that we're in the right directory
16 if [ ! -x dummyssh ] ; then
16 if [ ! -x dummyssh ] ; then
17 exit -1
17 exit -1
18 fi
18 fi
19
19
20 SSH_CLIENT='127.0.0.1 1 2'
20 SSH_CLIENT='127.0.0.1 1 2'
21 export SSH_CLIENT
21 export SSH_CLIENT
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 $2
23 $2
24 EOF
24 EOF
25 chmod +x dummyssh
25 chmod +x dummyssh
26
26
27 hg init remote
27 hg init remote
28 cd remote
28 cd remote
29 echo "# creating 'remote'"
29 echo "# creating 'remote'"
30 cat >>afile <<EOF
30 cat >>afile <<EOF
31 0
31 0
32 EOF
32 EOF
33 hg add afile
33 hg add afile
34 hg commit -m "0.0"
34 hg commit -m "0.0"
35 cat >>afile <<EOF
35 cat >>afile <<EOF
36 1
36 1
37 EOF
37 EOF
38 hg commit -m "0.1"
38 hg commit -m "0.1"
39 cat >>afile <<EOF
39 cat >>afile <<EOF
40 2
40 2
41 EOF
41 EOF
42 hg commit -m "0.2"
42 hg commit -m "0.2"
43 cat >>afile <<EOF
43 cat >>afile <<EOF
44 3
44 3
45 EOF
45 EOF
46 hg commit -m "0.3"
46 hg commit -m "0.3"
47 hg update -C 0
47 hg update -C 0
48 cat >>afile <<EOF
48 cat >>afile <<EOF
49 1
49 1
50 EOF
50 EOF
51 hg commit -m "1.1"
51 hg commit -m "1.1"
52 cat >>afile <<EOF
52 cat >>afile <<EOF
53 2
53 2
54 EOF
54 EOF
55 hg commit -m "1.2"
55 hg commit -m "1.2"
56 cat >fred <<EOF
56 cat >fred <<EOF
57 a line
57 a line
58 EOF
58 EOF
59 cat >>afile <<EOF
59 cat >>afile <<EOF
60 3
60 3
61 EOF
61 EOF
62 hg add fred
62 hg add fred
63 hg commit -m "1.3"
63 hg commit -m "1.3"
64 hg mv afile adifferentfile
64 hg mv afile adifferentfile
65 hg commit -m "1.3m"
65 hg commit -m "1.3m"
66 hg update -C 3
66 hg update -C 3
67 hg mv afile anotherfile
67 hg mv afile anotherfile
68 hg commit -m "0.3m"
68 hg commit -m "0.3m"
69 hg debugindex .hg/data/afile.i
69 hg debugindex .hg/store/data/afile.i
70 hg debugindex .hg/data/adifferentfile.i
70 hg debugindex .hg/store/data/adifferentfile.i
71 hg debugindex .hg/data/anotherfile.i
71 hg debugindex .hg/store/data/anotherfile.i
72 hg debugindex .hg/data/fred.i
72 hg debugindex .hg/store/data/fred.i
73 hg debugindex .hg/00manifest.i
73 hg debugindex .hg/store/00manifest.i
74 hg verify
74 hg verify
75 cd ..
75 cd ..
76
76
77 echo "# clone remote via stream"
77 echo "# clone remote via stream"
78 for i in 0 1 2 3 4 5 6 7 8; do
78 for i in 0 1 2 3 4 5 6 7 8; do
79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
80 if cd test-"$i"; then
80 if cd test-"$i"; then
81 hg verify
81 hg verify
82 cd ..
82 cd ..
83 fi
83 fi
84 done
84 done
85 cd test-8
85 cd test-8
86 hg pull ../test-7
86 hg pull ../test-7
87 hg verify
87 hg verify
88 cd ..
88 cd ..
89 cd test-1
89 cd test-1
90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
91 hg verify
91 hg verify
92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
93 cd ..
93 cd ..
94 cd test-2
94 cd test-2
95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
96 hg verify
96 hg verify
97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
98 hg verify
98 hg verify
99 cd ..
99 cd ..
@@ -1,88 +1,88 b''
1 # creating 'remote'
1 # creating 'remote'
2 # repo not found error
2 # repo not found error
3 remote: abort: repository nonexistent not found!
3 remote: abort: repository nonexistent not found!
4 abort: no suitable response from remote hg!
4 abort: no suitable response from remote hg!
5 # clone remote via stream
5 # clone remote via stream
6 streaming all changes
6 streaming all changes
7 XXX files to transfer, XXX bytes of data
7 XXX files to transfer, XXX bytes of data
8 transferred XXX bytes in XXX seconds (XXX XB/sec)
8 transferred XXX bytes in XXX seconds (XXX XB/sec)
9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
10 checking changesets
10 checking changesets
11 checking manifests
11 checking manifests
12 crosschecking files in changesets and manifests
12 crosschecking files in changesets and manifests
13 checking files
13 checking files
14 1 files, 1 changesets, 1 total revisions
14 2 files, 1 changesets, 2 total revisions
15 # clone remote via pull
15 # clone remote via pull
16 requesting all changes
16 requesting all changes
17 adding changesets
17 adding changesets
18 adding manifests
18 adding manifests
19 adding file changes
19 adding file changes
20 added 1 changesets with 1 changes to 1 files
20 added 1 changesets with 2 changes to 2 files
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 # verify
22 # verify
23 checking changesets
23 checking changesets
24 checking manifests
24 checking manifests
25 crosschecking files in changesets and manifests
25 crosschecking files in changesets and manifests
26 checking files
26 checking files
27 1 files, 1 changesets, 1 total revisions
27 2 files, 1 changesets, 2 total revisions
28 # empty default pull
28 # empty default pull
29 default = ssh://user@dummy/remote
29 default = ssh://user@dummy/remote
30 pulling from ssh://user@dummy/remote
30 pulling from ssh://user@dummy/remote
31 searching for changes
31 searching for changes
32 no changes found
32 no changes found
33 # local change
33 # local change
34 # updating rc
34 # updating rc
35 # find outgoing
35 # find outgoing
36 searching for changes
36 searching for changes
37 changeset: 1:c54836a570be
37 changeset: 1:572896fe480d
38 tag: tip
38 tag: tip
39 user: test
39 user: test
40 date: Mon Jan 12 13:46:40 1970 +0000
40 date: Mon Jan 12 13:46:40 1970 +0000
41 summary: add
41 summary: add
42
42
43 # find incoming on the remote side
43 # find incoming on the remote side
44 searching for changes
44 searching for changes
45 changeset: 1:c54836a570be
45 changeset: 1:572896fe480d
46 tag: tip
46 tag: tip
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: add
49 summary: add
50
50
51 # push
51 # push
52 pushing to ssh://user@dummy/remote
52 pushing to ssh://user@dummy/remote
53 searching for changes
53 searching for changes
54 remote: adding changesets
54 remote: adding changesets
55 remote: adding manifests
55 remote: adding manifests
56 remote: adding file changes
56 remote: adding file changes
57 remote: added 1 changesets with 1 changes to 1 files
57 remote: added 1 changesets with 1 changes to 1 files
58 # check remote tip
58 # check remote tip
59 changeset: 1:c54836a570be
59 changeset: 1:572896fe480d
60 tag: tip
60 tag: tip
61 user: test
61 user: test
62 date: Mon Jan 12 13:46:40 1970 +0000
62 date: Mon Jan 12 13:46:40 1970 +0000
63 summary: add
63 summary: add
64
64
65 checking changesets
65 checking changesets
66 checking manifests
66 checking manifests
67 crosschecking files in changesets and manifests
67 crosschecking files in changesets and manifests
68 checking files
68 checking files
69 1 files, 2 changesets, 2 total revisions
69 2 files, 2 changesets, 3 total revisions
70 bleah
70 bleah
71 # push should succeed
71 # push should succeed
72 pushing to ssh://user@dummy/remote
72 pushing to ssh://user@dummy/remote
73 searching for changes
73 searching for changes
74 note: unsynced remote changes!
74 note: unsynced remote changes!
75 remote: adding changesets
75 remote: adding changesets
76 remote: adding manifests
76 remote: adding manifests
77 remote: adding file changes
77 remote: adding file changes
78 remote: added 1 changesets with 1 changes to 1 files
78 remote: added 1 changesets with 1 changes to 1 files
79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
86 changegroup in remote: u=remote:ssh:127.0.0.1
86 changegroup in remote: u=remote:ssh:127.0.0.1
87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
88 changegroup in remote: u=remote:ssh:127.0.0.1
88 changegroup in remote: u=remote:ssh:127.0.0.1
General Comments 0
You need to be logged in to leave comments. Login now