##// END OF EJS Templates
switch to the .hg/store layout, fix the tests
Benoit Boissinot -
r3853:c0b44915 default
parent child Browse files
Show More
@@ -1,266 +1,270 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path='', create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106
107 107 def localpath(path):
108 108 if path.startswith('file://'):
109 109 return path[7:]
110 110 if path.startswith('file:'):
111 111 return path[5:]
112 112 return path
113 113
114 114 dest = localpath(dest)
115 115 source = localpath(source)
116 116
117 117 if os.path.exists(dest):
118 118 raise util.Abort(_("destination '%s' already exists") % dest)
119 119
120 120 class DirCleanup(object):
121 121 def __init__(self, dir_):
122 122 self.rmtree = shutil.rmtree
123 123 self.dir_ = dir_
124 124 def close(self):
125 125 self.dir_ = None
126 126 def __del__(self):
127 127 if self.dir_:
128 128 self.rmtree(self.dir_, True)
129 129
130 130 dir_cleanup = None
131 131 if islocal(dest):
132 132 dir_cleanup = DirCleanup(dest)
133 133
134 134 abspath = source
135 135 copy = False
136 136 if src_repo.local() and islocal(dest):
137 137 abspath = os.path.abspath(source)
138 138 copy = not pull and not rev
139 139
140 140 src_lock, dest_lock = None, None
141 141 if copy:
142 142 try:
143 143 # we use a lock here because if we race with commit, we
144 144 # can end up with extra data in the cloned revlogs that's
145 145 # not pointed to by changesets, thus causing verify to
146 146 # fail
147 147 src_lock = src_repo.lock()
148 148 except lock.LockException:
149 149 copy = False
150 150
151 151 if copy:
152 152 def force_copy(src, dst):
153 153 try:
154 154 util.copyfiles(src, dst)
155 155 except OSError, inst:
156 156 if inst.errno != errno.ENOENT:
157 157 raise
158 158
159 159 src_store = os.path.realpath(src_repo.spath)
160 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
161 dest_store = dest_path
162 160 if not os.path.exists(dest):
163 161 os.mkdir(dest)
162 dest_path = os.path.realpath(os.path.join(dest, ".hg"))
164 163 os.mkdir(dest_path)
164 if src_repo.spath != src_repo.path:
165 dest_store = os.path.join(dest_path, "store")
166 os.mkdir(dest_store)
167 else:
168 dest_store = dest_path
165 169 # copy the requires file
166 170 force_copy(src_repo.join("requires"),
167 171 os.path.join(dest_path, "requires"))
168 172 # we lock here to avoid premature writing to the target
169 173 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
170 174
171 175 files = ("data",
172 176 "00manifest.d", "00manifest.i",
173 177 "00changelog.d", "00changelog.i")
174 178 for f in files:
175 179 src = os.path.join(src_store, f)
176 180 dst = os.path.join(dest_store, f)
177 181 force_copy(src, dst)
178 182
179 183 # we need to re-init the repo after manually copying the data
180 184 # into it
181 185 dest_repo = repository(ui, dest)
182 186
183 187 else:
184 188 dest_repo = repository(ui, dest, create=True)
185 189
186 190 revs = None
187 191 if rev:
188 192 if 'lookup' not in src_repo.capabilities:
189 193 raise util.Abort(_("src repository does not support revision "
190 194 "lookup and so doesn't support clone by "
191 195 "revision"))
192 196 revs = [src_repo.lookup(r) for r in rev]
193 197
194 198 if dest_repo.local():
195 199 dest_repo.clone(src_repo, heads=revs, stream=stream)
196 200 elif src_repo.local():
197 201 src_repo.push(dest_repo, revs=revs)
198 202 else:
199 203 raise util.Abort(_("clone from remote to remote not supported"))
200 204
201 205 if src_lock:
202 206 src_lock.release()
203 207
204 208 if dest_repo.local():
205 209 fp = dest_repo.opener("hgrc", "w", text=True)
206 210 fp.write("[paths]\n")
207 211 fp.write("default = %s\n" % abspath)
208 212 fp.close()
209 213
210 214 if dest_lock:
211 215 dest_lock.release()
212 216
213 217 if update:
214 218 _update(dest_repo, dest_repo.changelog.tip())
215 219 if dir_cleanup:
216 220 dir_cleanup.close()
217 221
218 222 return src_repo, dest_repo
219 223
220 224 def _showstats(repo, stats):
221 225 stats = ((stats[0], _("updated")),
222 226 (stats[1], _("merged")),
223 227 (stats[2], _("removed")),
224 228 (stats[3], _("unresolved")))
225 229 note = ", ".join([_("%d files %s") % s for s in stats])
226 230 repo.ui.status("%s\n" % note)
227 231
228 232 def _update(repo, node): return update(repo, node)
229 233
230 234 def update(repo, node):
231 235 """update the working directory to node, merging linear changes"""
232 236 stats = _merge.update(repo, node, False, False, None, None)
233 237 _showstats(repo, stats)
234 238 if stats[3]:
235 239 repo.ui.status(_("There are unresolved merges with"
236 240 " locally modified files.\n"))
237 241 return stats[3]
238 242
239 243 def clean(repo, node, wlock=None, show_stats=True):
240 244 """forcibly switch the working directory to node, clobbering changes"""
241 245 stats = _merge.update(repo, node, False, True, None, wlock)
242 246 if show_stats: _showstats(repo, stats)
243 247 return stats[3]
244 248
245 249 def merge(repo, node, force=None, remind=True, wlock=None):
246 250 """branch merge with node, resolving changes"""
247 251 stats = _merge.update(repo, node, True, force, False, wlock)
248 252 _showstats(repo, stats)
249 253 if stats[3]:
250 254 pl = repo.parents()
251 255 repo.ui.status(_("There are unresolved merges,"
252 256 " you can redo the full merge using:\n"
253 257 " hg update -C %s\n"
254 258 " hg merge %s\n")
255 259 % (pl[0].rev(), pl[1].rev()))
256 260 elif remind:
257 261 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
258 262 return stats[3]
259 263
260 264 def revert(repo, node, choose, wlock):
261 265 """revert changes to revision in node without updating dirstate"""
262 266 return _merge.update(repo, node, False, True, choose, wlock)[3]
263 267
264 268 def verify(repo):
265 269 """verify the consistency of a repository"""
266 270 return _verify.verify(repo)
@@ -1,61 +1,63 b''
1 1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, mimetypes
10 10 import os.path
11 11
12 12 def get_mtime(repo_path):
13 hg_path = os.path.join(repo_path, ".hg")
14 cl_path = os.path.join(hg_path, "00changelog.i")
15 if os.path.exists(os.path.join(cl_path)):
13 store_path = os.path.join(repo_path, ".hg")
14 if not os.path.isdir(os.path.join(store_path, "data")):
15 store_path = os.path.join(store_path, "store")
16 cl_path = os.path.join(store_path, "00changelog.i")
17 if os.path.exists(cl_path):
16 18 return os.stat(cl_path).st_mtime
17 19 else:
18 return os.stat(hg_path).st_mtime
20 return os.stat(store_path).st_mtime
19 21
20 22 def staticfile(directory, fname, req):
21 23 """return a file inside directory with guessed content-type header
22 24
23 25 fname always uses '/' as directory separator and isn't allowed to
24 26 contain unusual path components.
25 27 Content-type is guessed using the mimetypes module.
26 28 Return an empty string if fname is illegal or file not found.
27 29
28 30 """
29 31 parts = fname.split('/')
30 32 path = directory
31 33 for part in parts:
32 34 if (part in ('', os.curdir, os.pardir) or
33 35 os.sep in part or os.altsep is not None and os.altsep in part):
34 36 return ""
35 37 path = os.path.join(path, part)
36 38 try:
37 39 os.stat(path)
38 40 ct = mimetypes.guess_type(path)[0] or "text/plain"
39 41 req.header([('Content-type', ct),
40 42 ('Content-length', os.path.getsize(path))])
41 43 return file(path, 'rb').read()
42 44 except (TypeError, OSError):
43 45 # illegal fname or unreadable file
44 46 return ""
45 47
46 48 def style_map(templatepath, style):
47 49 """Return path to mapfile for a given style.
48 50
49 51 Searches mapfile in the following locations:
50 52 1. templatepath/style/map
51 53 2. templatepath/map-style
52 54 3. templatepath/map
53 55 """
54 56 locations = style and [os.path.join(style, "map"), "map-"+style] or []
55 57 locations.append("map")
56 58 for location in locations:
57 59 mapfile = os.path.join(templatepath, location)
58 60 if os.path.isfile(mapfile):
59 61 return mapfile
60 62 raise RuntimeError("No hgweb templates found in %r" % templatepath)
61 63
@@ -1,1947 +1,1956 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1',)
19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.path = os.path.join(path, ".hg")
36 36 self.root = os.path.realpath(path)
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 #if self.spath != self.path:
47 # os.mkdir(self.spath)
48 requirements = ("revlogv1",)
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
49 48 reqfile = self.opener("requires", "w")
50 49 for r in requirements:
51 50 reqfile.write("%s\n" % r)
52 51 reqfile.close()
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write('\0\0\0\2')
53 54 else:
54 55 raise repo.RepoError(_("repository %s not found") % path)
55 56 elif create:
56 57 raise repo.RepoError(_("repository %s already exists") % path)
57 58 else:
58 59 # find requirements
59 60 try:
60 61 requirements = self.opener("requires").read().splitlines()
61 62 except IOError, inst:
62 63 if inst.errno != errno.ENOENT:
63 64 raise
64 65 requirements = []
65 66 # check them
66 67 for r in requirements:
67 68 if r not in self.supported:
68 69 raise repo.RepoError(_("requirement '%s' not supported") % r)
69 70
70 71 # setup store
71 self.spath = self.path
72 self.sopener = util.opener(self.spath)
72 if "store" in requirements:
73 self.encodefn = util.encodefilename
74 self.decodefn = util.decodefilename
75 self.spath = os.path.join(self.path, "store")
76 else:
77 self.encodefn = lambda x: x
78 self.decodefn = lambda x: x
79 self.spath = self.path
80 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
73 81
74 82 self.ui = ui.ui(parentui=parentui)
75 83 try:
76 84 self.ui.readconfig(self.join("hgrc"), self.root)
77 85 except IOError:
78 86 pass
79 87
80 88 v = self.ui.configrevlog()
81 89 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
82 90 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
83 91 fl = v.get('flags', None)
84 92 flags = 0
85 93 if fl != None:
86 94 for x in fl.split():
87 95 flags |= revlog.flagstr(x)
88 96 elif self.revlogv1:
89 97 flags = revlog.REVLOG_DEFAULT_FLAGS
90 98
91 99 v = self.revlogversion | flags
92 100 self.manifest = manifest.manifest(self.sopener, v)
93 101 self.changelog = changelog.changelog(self.sopener, v)
94 102
95 103 # the changelog might not have the inline index flag
96 104 # on. If the format of the changelog is the same as found in
97 105 # .hgrc, apply any flags found in the .hgrc as well.
98 106 # Otherwise, just version from the changelog
99 107 v = self.changelog.version
100 108 if v == self.revlogversion:
101 109 v |= flags
102 110 self.revlogversion = v
103 111
104 112 self.tagscache = None
105 113 self.branchcache = None
106 114 self.nodetagscache = None
107 115 self.encodepats = None
108 116 self.decodepats = None
109 117 self.transhandle = None
110 118
111 119 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
112 120
113 121 def url(self):
114 122 return 'file:' + self.root
115 123
116 124 def hook(self, name, throw=False, **args):
117 125 def callhook(hname, funcname):
118 126 '''call python hook. hook is callable object, looked up as
119 127 name in python module. if callable returns "true", hook
120 128 fails, else passes. if hook raises exception, treated as
121 129 hook failure. exception propagates if throw is "true".
122 130
123 131 reason for "true" meaning "hook failed" is so that
124 132 unmodified commands (e.g. mercurial.commands.update) can
125 133 be run as hooks without wrappers to convert return values.'''
126 134
127 135 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
128 136 d = funcname.rfind('.')
129 137 if d == -1:
130 138 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
131 139 % (hname, funcname))
132 140 modname = funcname[:d]
133 141 try:
134 142 obj = __import__(modname)
135 143 except ImportError:
136 144 try:
137 145 # extensions are loaded with hgext_ prefix
138 146 obj = __import__("hgext_%s" % modname)
139 147 except ImportError:
140 148 raise util.Abort(_('%s hook is invalid '
141 149 '(import of "%s" failed)') %
142 150 (hname, modname))
143 151 try:
144 152 for p in funcname.split('.')[1:]:
145 153 obj = getattr(obj, p)
146 154 except AttributeError, err:
147 155 raise util.Abort(_('%s hook is invalid '
148 156 '("%s" is not defined)') %
149 157 (hname, funcname))
150 158 if not callable(obj):
151 159 raise util.Abort(_('%s hook is invalid '
152 160 '("%s" is not callable)') %
153 161 (hname, funcname))
154 162 try:
155 163 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
156 164 except (KeyboardInterrupt, util.SignalInterrupt):
157 165 raise
158 166 except Exception, exc:
159 167 if isinstance(exc, util.Abort):
160 168 self.ui.warn(_('error: %s hook failed: %s\n') %
161 169 (hname, exc.args[0]))
162 170 else:
163 171 self.ui.warn(_('error: %s hook raised an exception: '
164 172 '%s\n') % (hname, exc))
165 173 if throw:
166 174 raise
167 175 self.ui.print_exc()
168 176 return True
169 177 if r:
170 178 if throw:
171 179 raise util.Abort(_('%s hook failed') % hname)
172 180 self.ui.warn(_('warning: %s hook failed\n') % hname)
173 181 return r
174 182
175 183 def runhook(name, cmd):
176 184 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
177 185 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
178 186 r = util.system(cmd, environ=env, cwd=self.root)
179 187 if r:
180 188 desc, r = util.explain_exit(r)
181 189 if throw:
182 190 raise util.Abort(_('%s hook %s') % (name, desc))
183 191 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
184 192 return r
185 193
186 194 r = False
187 195 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
188 196 if hname.split(".", 1)[0] == name and cmd]
189 197 hooks.sort()
190 198 for hname, cmd in hooks:
191 199 if cmd.startswith('python:'):
192 200 r = callhook(hname, cmd[7:].strip()) or r
193 201 else:
194 202 r = runhook(hname, cmd) or r
195 203 return r
196 204
197 205 tag_disallowed = ':\r\n'
198 206
199 207 def tag(self, name, node, message, local, user, date):
200 208 '''tag a revision with a symbolic name.
201 209
202 210 if local is True, the tag is stored in a per-repository file.
203 211 otherwise, it is stored in the .hgtags file, and a new
204 212 changeset is committed with the change.
205 213
206 214 keyword arguments:
207 215
208 216 local: whether to store tag in non-version-controlled file
209 217 (default False)
210 218
211 219 message: commit message to use if committing
212 220
213 221 user: name of user to use if committing
214 222
215 223 date: date tuple to use if committing'''
216 224
217 225 for c in self.tag_disallowed:
218 226 if c in name:
219 227 raise util.Abort(_('%r cannot be used in a tag name') % c)
220 228
221 229 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
222 230
223 231 if local:
224 232 # local tags are stored in the current charset
225 233 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
226 234 self.hook('tag', node=hex(node), tag=name, local=local)
227 235 return
228 236
229 237 for x in self.status()[:5]:
230 238 if '.hgtags' in x:
231 239 raise util.Abort(_('working copy of .hgtags is changed '
232 240 '(please commit .hgtags manually)'))
233 241
234 242 # committed tags are stored in UTF-8
235 243 line = '%s %s\n' % (hex(node), util.fromlocal(name))
236 244 self.wfile('.hgtags', 'ab').write(line)
237 245 if self.dirstate.state('.hgtags') == '?':
238 246 self.add(['.hgtags'])
239 247
240 248 self.commit(['.hgtags'], message, user, date)
241 249 self.hook('tag', node=hex(node), tag=name, local=local)
242 250
243 251 def tags(self):
244 252 '''return a mapping of tag to node'''
245 253 if not self.tagscache:
246 254 self.tagscache = {}
247 255
248 256 def parsetag(line, context):
249 257 if not line:
250 258 return
251 259 s = l.split(" ", 1)
252 260 if len(s) != 2:
253 261 self.ui.warn(_("%s: cannot parse entry\n") % context)
254 262 return
255 263 node, key = s
256 264 key = util.tolocal(key.strip()) # stored in UTF-8
257 265 try:
258 266 bin_n = bin(node)
259 267 except TypeError:
260 268 self.ui.warn(_("%s: node '%s' is not well formed\n") %
261 269 (context, node))
262 270 return
263 271 if bin_n not in self.changelog.nodemap:
264 272 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
265 273 (context, key))
266 274 return
267 275 self.tagscache[key] = bin_n
268 276
269 277 # read the tags file from each head, ending with the tip,
270 278 # and add each tag found to the map, with "newer" ones
271 279 # taking precedence
272 280 f = None
273 281 for rev, node, fnode in self._hgtagsnodes():
274 282 f = (f and f.filectx(fnode) or
275 283 self.filectx('.hgtags', fileid=fnode))
276 284 count = 0
277 285 for l in f.data().splitlines():
278 286 count += 1
279 287 parsetag(l, _("%s, line %d") % (str(f), count))
280 288
281 289 try:
282 290 f = self.opener("localtags")
283 291 count = 0
284 292 for l in f:
285 293 # localtags are stored in the local character set
286 294 # while the internal tag table is stored in UTF-8
287 295 l = util.fromlocal(l)
288 296 count += 1
289 297 parsetag(l, _("localtags, line %d") % count)
290 298 except IOError:
291 299 pass
292 300
293 301 self.tagscache['tip'] = self.changelog.tip()
294 302
295 303 return self.tagscache
296 304
297 305 def _hgtagsnodes(self):
298 306 heads = self.heads()
299 307 heads.reverse()
300 308 last = {}
301 309 ret = []
302 310 for node in heads:
303 311 c = self.changectx(node)
304 312 rev = c.rev()
305 313 try:
306 314 fnode = c.filenode('.hgtags')
307 315 except repo.LookupError:
308 316 continue
309 317 ret.append((rev, node, fnode))
310 318 if fnode in last:
311 319 ret[last[fnode]] = None
312 320 last[fnode] = len(ret) - 1
313 321 return [item for item in ret if item]
314 322
315 323 def tagslist(self):
316 324 '''return a list of tags ordered by revision'''
317 325 l = []
318 326 for t, n in self.tags().items():
319 327 try:
320 328 r = self.changelog.rev(n)
321 329 except:
322 330 r = -2 # sort to the beginning of the list if unknown
323 331 l.append((r, t, n))
324 332 l.sort()
325 333 return [(t, n) for r, t, n in l]
326 334
327 335 def nodetags(self, node):
328 336 '''return the tags associated with a node'''
329 337 if not self.nodetagscache:
330 338 self.nodetagscache = {}
331 339 for t, n in self.tags().items():
332 340 self.nodetagscache.setdefault(n, []).append(t)
333 341 return self.nodetagscache.get(node, [])
334 342
335 343 def branchtags(self):
336 344 if self.branchcache != None:
337 345 return self.branchcache
338 346
339 347 self.branchcache = {} # avoid recursion in changectx
340 348
341 349 partial, last, lrev = self._readbranchcache()
342 350
343 351 tiprev = self.changelog.count() - 1
344 352 if lrev != tiprev:
345 353 self._updatebranchcache(partial, lrev+1, tiprev+1)
346 354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
347 355
348 356 # the branch cache is stored on disk as UTF-8, but in the local
349 357 # charset internally
350 358 for k, v in partial.items():
351 359 self.branchcache[util.tolocal(k)] = v
352 360 return self.branchcache
353 361
354 362 def _readbranchcache(self):
355 363 partial = {}
356 364 try:
357 365 f = self.opener("branches.cache")
358 366 lines = f.read().split('\n')
359 367 f.close()
360 368 last, lrev = lines.pop(0).rstrip().split(" ", 1)
361 369 last, lrev = bin(last), int(lrev)
362 370 if not (lrev < self.changelog.count() and
363 371 self.changelog.node(lrev) == last): # sanity check
364 372 # invalidate the cache
365 373 raise ValueError('Invalid branch cache: unknown tip')
366 374 for l in lines:
367 375 if not l: continue
368 376 node, label = l.rstrip().split(" ", 1)
369 377 partial[label] = bin(node)
370 378 except (KeyboardInterrupt, util.SignalInterrupt):
371 379 raise
372 380 except Exception, inst:
373 381 if self.ui.debugflag:
374 382 self.ui.warn(str(inst), '\n')
375 383 partial, last, lrev = {}, nullid, nullrev
376 384 return partial, last, lrev
377 385
378 386 def _writebranchcache(self, branches, tip, tiprev):
379 387 try:
380 388 f = self.opener("branches.cache", "w")
381 389 f.write("%s %s\n" % (hex(tip), tiprev))
382 390 for label, node in branches.iteritems():
383 391 f.write("%s %s\n" % (hex(node), label))
384 392 except IOError:
385 393 pass
386 394
387 395 def _updatebranchcache(self, partial, start, end):
388 396 for r in xrange(start, end):
389 397 c = self.changectx(r)
390 398 b = c.branch()
391 399 if b:
392 400 partial[b] = c.node()
393 401
394 402 def lookup(self, key):
395 403 if key == '.':
396 404 key = self.dirstate.parents()[0]
397 405 if key == nullid:
398 406 raise repo.RepoError(_("no revision checked out"))
399 407 elif key == 'null':
400 408 return nullid
401 409 n = self.changelog._match(key)
402 410 if n:
403 411 return n
404 412 if key in self.tags():
405 413 return self.tags()[key]
406 414 if key in self.branchtags():
407 415 return self.branchtags()[key]
408 416 n = self.changelog._partialmatch(key)
409 417 if n:
410 418 return n
411 419 raise repo.RepoError(_("unknown revision '%s'") % key)
412 420
413 421 def dev(self):
414 422 return os.lstat(self.path).st_dev
415 423
416 424 def local(self):
417 425 return True
418 426
419 427 def join(self, f):
420 428 return os.path.join(self.path, f)
421 429
422 430 def sjoin(self, f):
431 f = self.encodefn(f)
423 432 return os.path.join(self.spath, f)
424 433
425 434 def wjoin(self, f):
426 435 return os.path.join(self.root, f)
427 436
428 437 def file(self, f):
429 438 if f[0] == '/':
430 439 f = f[1:]
431 440 return filelog.filelog(self.sopener, f, self.revlogversion)
432 441
433 442 def changectx(self, changeid=None):
434 443 return context.changectx(self, changeid)
435 444
436 445 def workingctx(self):
437 446 return context.workingctx(self)
438 447
439 448 def parents(self, changeid=None):
440 449 '''
441 450 get list of changectxs for parents of changeid or working directory
442 451 '''
443 452 if changeid is None:
444 453 pl = self.dirstate.parents()
445 454 else:
446 455 n = self.changelog.lookup(changeid)
447 456 pl = self.changelog.parents(n)
448 457 if pl[1] == nullid:
449 458 return [self.changectx(pl[0])]
450 459 return [self.changectx(pl[0]), self.changectx(pl[1])]
451 460
452 461 def filectx(self, path, changeid=None, fileid=None):
453 462 """changeid can be a changeset revision, node, or tag.
454 463 fileid can be a file revision or node."""
455 464 return context.filectx(self, path, changeid, fileid)
456 465
457 466 def getcwd(self):
458 467 return self.dirstate.getcwd()
459 468
460 469 def wfile(self, f, mode='r'):
461 470 return self.wopener(f, mode)
462 471
463 472 def wread(self, filename):
464 473 if self.encodepats == None:
465 474 l = []
466 475 for pat, cmd in self.ui.configitems("encode"):
467 476 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 477 l.append((mf, cmd))
469 478 self.encodepats = l
470 479
471 480 data = self.wopener(filename, 'r').read()
472 481
473 482 for mf, cmd in self.encodepats:
474 483 if mf(filename):
475 484 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
476 485 data = util.filter(data, cmd)
477 486 break
478 487
479 488 return data
480 489
481 490 def wwrite(self, filename, data, fd=None):
482 491 if self.decodepats == None:
483 492 l = []
484 493 for pat, cmd in self.ui.configitems("decode"):
485 494 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 495 l.append((mf, cmd))
487 496 self.decodepats = l
488 497
489 498 for mf, cmd in self.decodepats:
490 499 if mf(filename):
491 500 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
492 501 data = util.filter(data, cmd)
493 502 break
494 503
495 504 if fd:
496 505 return fd.write(data)
497 506 return self.wopener(filename, 'w').write(data)
498 507
499 508 def transaction(self):
500 509 tr = self.transhandle
501 510 if tr != None and tr.running():
502 511 return tr.nest()
503 512
504 513 # save dirstate for rollback
505 514 try:
506 515 ds = self.opener("dirstate").read()
507 516 except IOError:
508 517 ds = ""
509 518 self.opener("journal.dirstate", "w").write(ds)
510 519
511 520 renames = [(self.sjoin("journal"), self.sjoin("undo")),
512 521 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
513 522 tr = transaction.transaction(self.ui.warn, self.sopener,
514 523 self.sjoin("journal"),
515 524 aftertrans(renames))
516 525 self.transhandle = tr
517 526 return tr
518 527
519 528 def recover(self):
520 529 l = self.lock()
521 530 if os.path.exists(self.sjoin("journal")):
522 531 self.ui.status(_("rolling back interrupted transaction\n"))
523 532 transaction.rollback(self.sopener, self.sjoin("journal"))
524 533 self.reload()
525 534 return True
526 535 else:
527 536 self.ui.warn(_("no interrupted transaction available\n"))
528 537 return False
529 538
530 539 def rollback(self, wlock=None):
531 540 if not wlock:
532 541 wlock = self.wlock()
533 542 l = self.lock()
534 543 if os.path.exists(self.sjoin("undo")):
535 544 self.ui.status(_("rolling back last transaction\n"))
536 545 transaction.rollback(self.sopener, self.sjoin("undo"))
537 546 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
538 547 self.reload()
539 548 self.wreload()
540 549 else:
541 550 self.ui.warn(_("no rollback information available\n"))
542 551
543 552 def wreload(self):
544 553 self.dirstate.read()
545 554
546 555 def reload(self):
547 556 self.changelog.load()
548 557 self.manifest.load()
549 558 self.tagscache = None
550 559 self.nodetagscache = None
551 560
552 561 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
553 562 desc=None):
554 563 try:
555 564 l = lock.lock(lockname, 0, releasefn, desc=desc)
556 565 except lock.LockHeld, inst:
557 566 if not wait:
558 567 raise
559 568 self.ui.warn(_("waiting for lock on %s held by %r\n") %
560 569 (desc, inst.locker))
561 570 # default to 600 seconds timeout
562 571 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
563 572 releasefn, desc=desc)
564 573 if acquirefn:
565 574 acquirefn()
566 575 return l
567 576
568 577 def lock(self, wait=1):
569 578 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
570 579 desc=_('repository %s') % self.origroot)
571 580
572 581 def wlock(self, wait=1):
573 582 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
574 583 self.wreload,
575 584 desc=_('working directory of %s') % self.origroot)
576 585
577 586 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
578 587 """
579 588 commit an individual file as part of a larger transaction
580 589 """
581 590
582 591 t = self.wread(fn)
583 592 fl = self.file(fn)
584 593 fp1 = manifest1.get(fn, nullid)
585 594 fp2 = manifest2.get(fn, nullid)
586 595
587 596 meta = {}
588 597 cp = self.dirstate.copied(fn)
589 598 if cp:
590 599 meta["copy"] = cp
591 600 if not manifest2: # not a branch merge
592 601 meta["copyrev"] = hex(manifest1.get(cp, nullid))
593 602 fp2 = nullid
594 603 elif fp2 != nullid: # copied on remote side
595 604 meta["copyrev"] = hex(manifest1.get(cp, nullid))
596 605 elif fp1 != nullid: # copied on local side, reversed
597 606 meta["copyrev"] = hex(manifest2.get(cp))
598 607 fp2 = nullid
599 608 else: # directory rename
600 609 meta["copyrev"] = hex(manifest1.get(cp, nullid))
601 610 self.ui.debug(_(" %s: copy %s:%s\n") %
602 611 (fn, cp, meta["copyrev"]))
603 612 fp1 = nullid
604 613 elif fp2 != nullid:
605 614 # is one parent an ancestor of the other?
606 615 fpa = fl.ancestor(fp1, fp2)
607 616 if fpa == fp1:
608 617 fp1, fp2 = fp2, nullid
609 618 elif fpa == fp2:
610 619 fp2 = nullid
611 620
612 621 # is the file unmodified from the parent? report existing entry
613 622 if fp2 == nullid and not fl.cmp(fp1, t):
614 623 return fp1
615 624
616 625 changelist.append(fn)
617 626 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
618 627
619 628 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
620 629 if p1 is None:
621 630 p1, p2 = self.dirstate.parents()
622 631 return self.commit(files=files, text=text, user=user, date=date,
623 632 p1=p1, p2=p2, wlock=wlock)
624 633
625 634 def commit(self, files=None, text="", user=None, date=None,
626 635 match=util.always, force=False, lock=None, wlock=None,
627 636 force_editor=False, p1=None, p2=None, extra={}):
628 637
629 638 commit = []
630 639 remove = []
631 640 changed = []
632 641 use_dirstate = (p1 is None) # not rawcommit
633 642 extra = extra.copy()
634 643
635 644 if use_dirstate:
636 645 if files:
637 646 for f in files:
638 647 s = self.dirstate.state(f)
639 648 if s in 'nmai':
640 649 commit.append(f)
641 650 elif s == 'r':
642 651 remove.append(f)
643 652 else:
644 653 self.ui.warn(_("%s not tracked!\n") % f)
645 654 else:
646 655 changes = self.status(match=match)[:5]
647 656 modified, added, removed, deleted, unknown = changes
648 657 commit = modified + added
649 658 remove = removed
650 659 else:
651 660 commit = files
652 661
653 662 if use_dirstate:
654 663 p1, p2 = self.dirstate.parents()
655 664 update_dirstate = True
656 665 else:
657 666 p1, p2 = p1, p2 or nullid
658 667 update_dirstate = (self.dirstate.parents()[0] == p1)
659 668
660 669 c1 = self.changelog.read(p1)
661 670 c2 = self.changelog.read(p2)
662 671 m1 = self.manifest.read(c1[0]).copy()
663 672 m2 = self.manifest.read(c2[0])
664 673
665 674 if use_dirstate:
666 675 branchname = util.fromlocal(self.workingctx().branch())
667 676 else:
668 677 branchname = ""
669 678
670 679 if use_dirstate:
671 680 oldname = c1[5].get("branch", "") # stored in UTF-8
672 681 if not commit and not remove and not force and p2 == nullid and \
673 682 branchname == oldname:
674 683 self.ui.status(_("nothing changed\n"))
675 684 return None
676 685
677 686 xp1 = hex(p1)
678 687 if p2 == nullid: xp2 = ''
679 688 else: xp2 = hex(p2)
680 689
681 690 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
682 691
683 692 if not wlock:
684 693 wlock = self.wlock()
685 694 if not lock:
686 695 lock = self.lock()
687 696 tr = self.transaction()
688 697
689 698 # check in files
690 699 new = {}
691 700 linkrev = self.changelog.count()
692 701 commit.sort()
693 702 for f in commit:
694 703 self.ui.note(f + "\n")
695 704 try:
696 705 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
697 706 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
698 707 except IOError:
699 708 if use_dirstate:
700 709 self.ui.warn(_("trouble committing %s!\n") % f)
701 710 raise
702 711 else:
703 712 remove.append(f)
704 713
705 714 # update manifest
706 715 m1.update(new)
707 716 remove.sort()
708 717
709 718 for f in remove:
710 719 if f in m1:
711 720 del m1[f]
712 721 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
713 722
714 723 # add changeset
715 724 new = new.keys()
716 725 new.sort()
717 726
718 727 user = user or self.ui.username()
719 728 if not text or force_editor:
720 729 edittext = []
721 730 if text:
722 731 edittext.append(text)
723 732 edittext.append("")
724 733 edittext.append("HG: user: %s" % user)
725 734 if p2 != nullid:
726 735 edittext.append("HG: branch merge")
727 736 edittext.extend(["HG: changed %s" % f for f in changed])
728 737 edittext.extend(["HG: removed %s" % f for f in remove])
729 738 if not changed and not remove:
730 739 edittext.append("HG: no files changed")
731 740 edittext.append("")
732 741 # run editor in the repository root
733 742 olddir = os.getcwd()
734 743 os.chdir(self.root)
735 744 text = self.ui.edit("\n".join(edittext), user)
736 745 os.chdir(olddir)
737 746
738 747 lines = [line.rstrip() for line in text.rstrip().splitlines()]
739 748 while lines and not lines[0]:
740 749 del lines[0]
741 750 if not lines:
742 751 return None
743 752 text = '\n'.join(lines)
744 753 if branchname:
745 754 extra["branch"] = branchname
746 755 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
747 756 user, date, extra)
748 757 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
749 758 parent2=xp2)
750 759 tr.close()
751 760
752 761 if use_dirstate or update_dirstate:
753 762 self.dirstate.setparents(n)
754 763 if use_dirstate:
755 764 self.dirstate.update(new, "n")
756 765 self.dirstate.forget(remove)
757 766
758 767 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
759 768 return n
760 769
761 770 def walk(self, node=None, files=[], match=util.always, badmatch=None):
762 771 '''
763 772 walk recursively through the directory tree or a given
764 773 changeset, finding all files matched by the match
765 774 function
766 775
767 776 results are yielded in a tuple (src, filename), where src
768 777 is one of:
769 778 'f' the file was found in the directory tree
770 779 'm' the file was only in the dirstate and not in the tree
771 780 'b' file was not found and matched badmatch
772 781 '''
773 782
774 783 if node:
775 784 fdict = dict.fromkeys(files)
776 785 for fn in self.manifest.read(self.changelog.read(node)[0]):
777 786 for ffn in fdict:
778 787 # match if the file is the exact name or a directory
779 788 if ffn == fn or fn.startswith("%s/" % ffn):
780 789 del fdict[ffn]
781 790 break
782 791 if match(fn):
783 792 yield 'm', fn
784 793 for fn in fdict:
785 794 if badmatch and badmatch(fn):
786 795 if match(fn):
787 796 yield 'b', fn
788 797 else:
789 798 self.ui.warn(_('%s: No such file in rev %s\n') % (
790 799 util.pathto(self.getcwd(), fn), short(node)))
791 800 else:
792 801 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
793 802 yield src, fn
794 803
795 804 def status(self, node1=None, node2=None, files=[], match=util.always,
796 805 wlock=None, list_ignored=False, list_clean=False):
797 806 """return status of files between two nodes or node and working directory
798 807
799 808 If node1 is None, use the first dirstate parent instead.
800 809 If node2 is None, compare node1 with working directory.
801 810 """
802 811
803 812 def fcmp(fn, mf):
804 813 t1 = self.wread(fn)
805 814 return self.file(fn).cmp(mf.get(fn, nullid), t1)
806 815
807 816 def mfmatches(node):
808 817 change = self.changelog.read(node)
809 818 mf = self.manifest.read(change[0]).copy()
810 819 for fn in mf.keys():
811 820 if not match(fn):
812 821 del mf[fn]
813 822 return mf
814 823
815 824 modified, added, removed, deleted, unknown = [], [], [], [], []
816 825 ignored, clean = [], []
817 826
818 827 compareworking = False
819 828 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
820 829 compareworking = True
821 830
822 831 if not compareworking:
823 832 # read the manifest from node1 before the manifest from node2,
824 833 # so that we'll hit the manifest cache if we're going through
825 834 # all the revisions in parent->child order.
826 835 mf1 = mfmatches(node1)
827 836
828 837 # are we comparing the working directory?
829 838 if not node2:
830 839 if not wlock:
831 840 try:
832 841 wlock = self.wlock(wait=0)
833 842 except lock.LockException:
834 843 wlock = None
835 844 (lookup, modified, added, removed, deleted, unknown,
836 845 ignored, clean) = self.dirstate.status(files, match,
837 846 list_ignored, list_clean)
838 847
839 848 # are we comparing working dir against its parent?
840 849 if compareworking:
841 850 if lookup:
842 851 # do a full compare of any files that might have changed
843 852 mf2 = mfmatches(self.dirstate.parents()[0])
844 853 for f in lookup:
845 854 if fcmp(f, mf2):
846 855 modified.append(f)
847 856 else:
848 857 clean.append(f)
849 858 if wlock is not None:
850 859 self.dirstate.update([f], "n")
851 860 else:
852 861 # we are comparing working dir against non-parent
853 862 # generate a pseudo-manifest for the working dir
854 863 # XXX: create it in dirstate.py ?
855 864 mf2 = mfmatches(self.dirstate.parents()[0])
856 865 for f in lookup + modified + added:
857 866 mf2[f] = ""
858 867 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
859 868 for f in removed:
860 869 if f in mf2:
861 870 del mf2[f]
862 871 else:
863 872 # we are comparing two revisions
864 873 mf2 = mfmatches(node2)
865 874
866 875 if not compareworking:
867 876 # flush lists from dirstate before comparing manifests
868 877 modified, added, clean = [], [], []
869 878
870 879 # make sure to sort the files so we talk to the disk in a
871 880 # reasonable order
872 881 mf2keys = mf2.keys()
873 882 mf2keys.sort()
874 883 for fn in mf2keys:
875 884 if mf1.has_key(fn):
876 885 if mf1.flags(fn) != mf2.flags(fn) or \
877 886 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
878 887 modified.append(fn)
879 888 elif list_clean:
880 889 clean.append(fn)
881 890 del mf1[fn]
882 891 else:
883 892 added.append(fn)
884 893
885 894 removed = mf1.keys()
886 895
887 896 # sort and return results:
888 897 for l in modified, added, removed, deleted, unknown, ignored, clean:
889 898 l.sort()
890 899 return (modified, added, removed, deleted, unknown, ignored, clean)
891 900
892 901 def add(self, list, wlock=None):
893 902 if not wlock:
894 903 wlock = self.wlock()
895 904 for f in list:
896 905 p = self.wjoin(f)
897 906 if not os.path.exists(p):
898 907 self.ui.warn(_("%s does not exist!\n") % f)
899 908 elif not os.path.isfile(p):
900 909 self.ui.warn(_("%s not added: only files supported currently\n")
901 910 % f)
902 911 elif self.dirstate.state(f) in 'an':
903 912 self.ui.warn(_("%s already tracked!\n") % f)
904 913 else:
905 914 self.dirstate.update([f], "a")
906 915
907 916 def forget(self, list, wlock=None):
908 917 if not wlock:
909 918 wlock = self.wlock()
910 919 for f in list:
911 920 if self.dirstate.state(f) not in 'ai':
912 921 self.ui.warn(_("%s not added!\n") % f)
913 922 else:
914 923 self.dirstate.forget([f])
915 924
916 925 def remove(self, list, unlink=False, wlock=None):
917 926 if unlink:
918 927 for f in list:
919 928 try:
920 929 util.unlink(self.wjoin(f))
921 930 except OSError, inst:
922 931 if inst.errno != errno.ENOENT:
923 932 raise
924 933 if not wlock:
925 934 wlock = self.wlock()
926 935 for f in list:
927 936 p = self.wjoin(f)
928 937 if os.path.exists(p):
929 938 self.ui.warn(_("%s still exists!\n") % f)
930 939 elif self.dirstate.state(f) == 'a':
931 940 self.dirstate.forget([f])
932 941 elif f not in self.dirstate:
933 942 self.ui.warn(_("%s not tracked!\n") % f)
934 943 else:
935 944 self.dirstate.update([f], "r")
936 945
937 946 def undelete(self, list, wlock=None):
938 947 p = self.dirstate.parents()[0]
939 948 mn = self.changelog.read(p)[0]
940 949 m = self.manifest.read(mn)
941 950 if not wlock:
942 951 wlock = self.wlock()
943 952 for f in list:
944 953 if self.dirstate.state(f) not in "r":
945 954 self.ui.warn("%s not removed!\n" % f)
946 955 else:
947 956 t = self.file(f).read(m[f])
948 957 self.wwrite(f, t)
949 958 util.set_exec(self.wjoin(f), m.execf(f))
950 959 self.dirstate.update([f], "n")
951 960
952 961 def copy(self, source, dest, wlock=None):
953 962 p = self.wjoin(dest)
954 963 if not os.path.exists(p):
955 964 self.ui.warn(_("%s does not exist!\n") % dest)
956 965 elif not os.path.isfile(p):
957 966 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
958 967 else:
959 968 if not wlock:
960 969 wlock = self.wlock()
961 970 if self.dirstate.state(dest) == '?':
962 971 self.dirstate.update([dest], "a")
963 972 self.dirstate.copy(source, dest)
964 973
965 974 def heads(self, start=None):
966 975 heads = self.changelog.heads(start)
967 976 # sort the output in rev descending order
968 977 heads = [(-self.changelog.rev(h), h) for h in heads]
969 978 heads.sort()
970 979 return [n for (r, n) in heads]
971 980
972 981 # branchlookup returns a dict giving a list of branches for
973 982 # each head. A branch is defined as the tag of a node or
974 983 # the branch of the node's parents. If a node has multiple
975 984 # branch tags, tags are eliminated if they are visible from other
976 985 # branch tags.
977 986 #
978 987 # So, for this graph: a->b->c->d->e
979 988 # \ /
980 989 # aa -----/
981 990 # a has tag 2.6.12
982 991 # d has tag 2.6.13
983 992 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
984 993 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
985 994 # from the list.
986 995 #
987 996 # It is possible that more than one head will have the same branch tag.
988 997 # callers need to check the result for multiple heads under the same
989 998 # branch tag if that is a problem for them (ie checkout of a specific
990 999 # branch).
991 1000 #
992 1001 # passing in a specific branch will limit the depth of the search
993 1002 # through the parents. It won't limit the branches returned in the
994 1003 # result though.
995 1004 def branchlookup(self, heads=None, branch=None):
996 1005 if not heads:
997 1006 heads = self.heads()
998 1007 headt = [ h for h in heads ]
999 1008 chlog = self.changelog
1000 1009 branches = {}
1001 1010 merges = []
1002 1011 seenmerge = {}
1003 1012
1004 1013 # traverse the tree once for each head, recording in the branches
1005 1014 # dict which tags are visible from this head. The branches
1006 1015 # dict also records which tags are visible from each tag
1007 1016 # while we traverse.
1008 1017 while headt or merges:
1009 1018 if merges:
1010 1019 n, found = merges.pop()
1011 1020 visit = [n]
1012 1021 else:
1013 1022 h = headt.pop()
1014 1023 visit = [h]
1015 1024 found = [h]
1016 1025 seen = {}
1017 1026 while visit:
1018 1027 n = visit.pop()
1019 1028 if n in seen:
1020 1029 continue
1021 1030 pp = chlog.parents(n)
1022 1031 tags = self.nodetags(n)
1023 1032 if tags:
1024 1033 for x in tags:
1025 1034 if x == 'tip':
1026 1035 continue
1027 1036 for f in found:
1028 1037 branches.setdefault(f, {})[n] = 1
1029 1038 branches.setdefault(n, {})[n] = 1
1030 1039 break
1031 1040 if n not in found:
1032 1041 found.append(n)
1033 1042 if branch in tags:
1034 1043 continue
1035 1044 seen[n] = 1
1036 1045 if pp[1] != nullid and n not in seenmerge:
1037 1046 merges.append((pp[1], [x for x in found]))
1038 1047 seenmerge[n] = 1
1039 1048 if pp[0] != nullid:
1040 1049 visit.append(pp[0])
1041 1050 # traverse the branches dict, eliminating branch tags from each
1042 1051 # head that are visible from another branch tag for that head.
1043 1052 out = {}
1044 1053 viscache = {}
1045 1054 for h in heads:
1046 1055 def visible(node):
1047 1056 if node in viscache:
1048 1057 return viscache[node]
1049 1058 ret = {}
1050 1059 visit = [node]
1051 1060 while visit:
1052 1061 x = visit.pop()
1053 1062 if x in viscache:
1054 1063 ret.update(viscache[x])
1055 1064 elif x not in ret:
1056 1065 ret[x] = 1
1057 1066 if x in branches:
1058 1067 visit[len(visit):] = branches[x].keys()
1059 1068 viscache[node] = ret
1060 1069 return ret
1061 1070 if h not in branches:
1062 1071 continue
1063 1072 # O(n^2), but somewhat limited. This only searches the
1064 1073 # tags visible from a specific head, not all the tags in the
1065 1074 # whole repo.
1066 1075 for b in branches[h]:
1067 1076 vis = False
1068 1077 for bb in branches[h].keys():
1069 1078 if b != bb:
1070 1079 if b in visible(bb):
1071 1080 vis = True
1072 1081 break
1073 1082 if not vis:
1074 1083 l = out.setdefault(h, [])
1075 1084 l[len(l):] = self.nodetags(b)
1076 1085 return out
1077 1086
1078 1087 def branches(self, nodes):
1079 1088 if not nodes:
1080 1089 nodes = [self.changelog.tip()]
1081 1090 b = []
1082 1091 for n in nodes:
1083 1092 t = n
1084 1093 while 1:
1085 1094 p = self.changelog.parents(n)
1086 1095 if p[1] != nullid or p[0] == nullid:
1087 1096 b.append((t, n, p[0], p[1]))
1088 1097 break
1089 1098 n = p[0]
1090 1099 return b
1091 1100
1092 1101 def between(self, pairs):
1093 1102 r = []
1094 1103
1095 1104 for top, bottom in pairs:
1096 1105 n, l, i = top, [], 0
1097 1106 f = 1
1098 1107
1099 1108 while n != bottom:
1100 1109 p = self.changelog.parents(n)[0]
1101 1110 if i == f:
1102 1111 l.append(n)
1103 1112 f = f * 2
1104 1113 n = p
1105 1114 i += 1
1106 1115
1107 1116 r.append(l)
1108 1117
1109 1118 return r
1110 1119
1111 1120 def findincoming(self, remote, base=None, heads=None, force=False):
1112 1121 """Return list of roots of the subsets of missing nodes from remote
1113 1122
1114 1123 If base dict is specified, assume that these nodes and their parents
1115 1124 exist on the remote side and that no child of a node of base exists
1116 1125 in both remote and self.
1117 1126 Furthermore base will be updated to include the nodes that exists
1118 1127 in self and remote but no children exists in self and remote.
1119 1128 If a list of heads is specified, return only nodes which are heads
1120 1129 or ancestors of these heads.
1121 1130
1122 1131 All the ancestors of base are in self and in remote.
1123 1132 All the descendants of the list returned are missing in self.
1124 1133 (and so we know that the rest of the nodes are missing in remote, see
1125 1134 outgoing)
1126 1135 """
1127 1136 m = self.changelog.nodemap
1128 1137 search = []
1129 1138 fetch = {}
1130 1139 seen = {}
1131 1140 seenbranch = {}
1132 1141 if base == None:
1133 1142 base = {}
1134 1143
1135 1144 if not heads:
1136 1145 heads = remote.heads()
1137 1146
1138 1147 if self.changelog.tip() == nullid:
1139 1148 base[nullid] = 1
1140 1149 if heads != [nullid]:
1141 1150 return [nullid]
1142 1151 return []
1143 1152
1144 1153 # assume we're closer to the tip than the root
1145 1154 # and start by examining the heads
1146 1155 self.ui.status(_("searching for changes\n"))
1147 1156
1148 1157 unknown = []
1149 1158 for h in heads:
1150 1159 if h not in m:
1151 1160 unknown.append(h)
1152 1161 else:
1153 1162 base[h] = 1
1154 1163
1155 1164 if not unknown:
1156 1165 return []
1157 1166
1158 1167 req = dict.fromkeys(unknown)
1159 1168 reqcnt = 0
1160 1169
1161 1170 # search through remote branches
1162 1171 # a 'branch' here is a linear segment of history, with four parts:
1163 1172 # head, root, first parent, second parent
1164 1173 # (a branch always has two parents (or none) by definition)
1165 1174 unknown = remote.branches(unknown)
1166 1175 while unknown:
1167 1176 r = []
1168 1177 while unknown:
1169 1178 n = unknown.pop(0)
1170 1179 if n[0] in seen:
1171 1180 continue
1172 1181
1173 1182 self.ui.debug(_("examining %s:%s\n")
1174 1183 % (short(n[0]), short(n[1])))
1175 1184 if n[0] == nullid: # found the end of the branch
1176 1185 pass
1177 1186 elif n in seenbranch:
1178 1187 self.ui.debug(_("branch already found\n"))
1179 1188 continue
1180 1189 elif n[1] and n[1] in m: # do we know the base?
1181 1190 self.ui.debug(_("found incomplete branch %s:%s\n")
1182 1191 % (short(n[0]), short(n[1])))
1183 1192 search.append(n) # schedule branch range for scanning
1184 1193 seenbranch[n] = 1
1185 1194 else:
1186 1195 if n[1] not in seen and n[1] not in fetch:
1187 1196 if n[2] in m and n[3] in m:
1188 1197 self.ui.debug(_("found new changeset %s\n") %
1189 1198 short(n[1]))
1190 1199 fetch[n[1]] = 1 # earliest unknown
1191 1200 for p in n[2:4]:
1192 1201 if p in m:
1193 1202 base[p] = 1 # latest known
1194 1203
1195 1204 for p in n[2:4]:
1196 1205 if p not in req and p not in m:
1197 1206 r.append(p)
1198 1207 req[p] = 1
1199 1208 seen[n[0]] = 1
1200 1209
1201 1210 if r:
1202 1211 reqcnt += 1
1203 1212 self.ui.debug(_("request %d: %s\n") %
1204 1213 (reqcnt, " ".join(map(short, r))))
1205 1214 for p in xrange(0, len(r), 10):
1206 1215 for b in remote.branches(r[p:p+10]):
1207 1216 self.ui.debug(_("received %s:%s\n") %
1208 1217 (short(b[0]), short(b[1])))
1209 1218 unknown.append(b)
1210 1219
1211 1220 # do binary search on the branches we found
1212 1221 while search:
1213 1222 n = search.pop(0)
1214 1223 reqcnt += 1
1215 1224 l = remote.between([(n[0], n[1])])[0]
1216 1225 l.append(n[1])
1217 1226 p = n[0]
1218 1227 f = 1
1219 1228 for i in l:
1220 1229 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1221 1230 if i in m:
1222 1231 if f <= 2:
1223 1232 self.ui.debug(_("found new branch changeset %s\n") %
1224 1233 short(p))
1225 1234 fetch[p] = 1
1226 1235 base[i] = 1
1227 1236 else:
1228 1237 self.ui.debug(_("narrowed branch search to %s:%s\n")
1229 1238 % (short(p), short(i)))
1230 1239 search.append((p, i))
1231 1240 break
1232 1241 p, f = i, f * 2
1233 1242
1234 1243 # sanity check our fetch list
1235 1244 for f in fetch.keys():
1236 1245 if f in m:
1237 1246 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1238 1247
1239 1248 if base.keys() == [nullid]:
1240 1249 if force:
1241 1250 self.ui.warn(_("warning: repository is unrelated\n"))
1242 1251 else:
1243 1252 raise util.Abort(_("repository is unrelated"))
1244 1253
1245 1254 self.ui.debug(_("found new changesets starting at ") +
1246 1255 " ".join([short(f) for f in fetch]) + "\n")
1247 1256
1248 1257 self.ui.debug(_("%d total queries\n") % reqcnt)
1249 1258
1250 1259 return fetch.keys()
1251 1260
1252 1261 def findoutgoing(self, remote, base=None, heads=None, force=False):
1253 1262 """Return list of nodes that are roots of subsets not in remote
1254 1263
1255 1264 If base dict is specified, assume that these nodes and their parents
1256 1265 exist on the remote side.
1257 1266 If a list of heads is specified, return only nodes which are heads
1258 1267 or ancestors of these heads, and return a second element which
1259 1268 contains all remote heads which get new children.
1260 1269 """
1261 1270 if base == None:
1262 1271 base = {}
1263 1272 self.findincoming(remote, base, heads, force=force)
1264 1273
1265 1274 self.ui.debug(_("common changesets up to ")
1266 1275 + " ".join(map(short, base.keys())) + "\n")
1267 1276
1268 1277 remain = dict.fromkeys(self.changelog.nodemap)
1269 1278
1270 1279 # prune everything remote has from the tree
1271 1280 del remain[nullid]
1272 1281 remove = base.keys()
1273 1282 while remove:
1274 1283 n = remove.pop(0)
1275 1284 if n in remain:
1276 1285 del remain[n]
1277 1286 for p in self.changelog.parents(n):
1278 1287 remove.append(p)
1279 1288
1280 1289 # find every node whose parents have been pruned
1281 1290 subset = []
1282 1291 # find every remote head that will get new children
1283 1292 updated_heads = {}
1284 1293 for n in remain:
1285 1294 p1, p2 = self.changelog.parents(n)
1286 1295 if p1 not in remain and p2 not in remain:
1287 1296 subset.append(n)
1288 1297 if heads:
1289 1298 if p1 in heads:
1290 1299 updated_heads[p1] = True
1291 1300 if p2 in heads:
1292 1301 updated_heads[p2] = True
1293 1302
1294 1303 # this is the set of all roots we have to push
1295 1304 if heads:
1296 1305 return subset, updated_heads.keys()
1297 1306 else:
1298 1307 return subset
1299 1308
1300 1309 def pull(self, remote, heads=None, force=False, lock=None):
1301 1310 mylock = False
1302 1311 if not lock:
1303 1312 lock = self.lock()
1304 1313 mylock = True
1305 1314
1306 1315 try:
1307 1316 fetch = self.findincoming(remote, force=force)
1308 1317 if fetch == [nullid]:
1309 1318 self.ui.status(_("requesting all changes\n"))
1310 1319
1311 1320 if not fetch:
1312 1321 self.ui.status(_("no changes found\n"))
1313 1322 return 0
1314 1323
1315 1324 if heads is None:
1316 1325 cg = remote.changegroup(fetch, 'pull')
1317 1326 else:
1318 1327 if 'changegroupsubset' not in remote.capabilities:
1319 1328 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1320 1329 cg = remote.changegroupsubset(fetch, heads, 'pull')
1321 1330 return self.addchangegroup(cg, 'pull', remote.url())
1322 1331 finally:
1323 1332 if mylock:
1324 1333 lock.release()
1325 1334
1326 1335 def push(self, remote, force=False, revs=None):
1327 1336 # there are two ways to push to remote repo:
1328 1337 #
1329 1338 # addchangegroup assumes local user can lock remote
1330 1339 # repo (local filesystem, old ssh servers).
1331 1340 #
1332 1341 # unbundle assumes local user cannot lock remote repo (new ssh
1333 1342 # servers, http servers).
1334 1343
1335 1344 if remote.capable('unbundle'):
1336 1345 return self.push_unbundle(remote, force, revs)
1337 1346 return self.push_addchangegroup(remote, force, revs)
1338 1347
1339 1348 def prepush(self, remote, force, revs):
1340 1349 base = {}
1341 1350 remote_heads = remote.heads()
1342 1351 inc = self.findincoming(remote, base, remote_heads, force=force)
1343 1352
1344 1353 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1345 1354 if revs is not None:
1346 1355 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1347 1356 else:
1348 1357 bases, heads = update, self.changelog.heads()
1349 1358
1350 1359 if not bases:
1351 1360 self.ui.status(_("no changes found\n"))
1352 1361 return None, 1
1353 1362 elif not force:
1354 1363 # check if we're creating new remote heads
1355 1364 # to be a remote head after push, node must be either
1356 1365 # - unknown locally
1357 1366 # - a local outgoing head descended from update
1358 1367 # - a remote head that's known locally and not
1359 1368 # ancestral to an outgoing head
1360 1369
1361 1370 warn = 0
1362 1371
1363 1372 if remote_heads == [nullid]:
1364 1373 warn = 0
1365 1374 elif not revs and len(heads) > len(remote_heads):
1366 1375 warn = 1
1367 1376 else:
1368 1377 newheads = list(heads)
1369 1378 for r in remote_heads:
1370 1379 if r in self.changelog.nodemap:
1371 1380 desc = self.changelog.heads(r)
1372 1381 l = [h for h in heads if h in desc]
1373 1382 if not l:
1374 1383 newheads.append(r)
1375 1384 else:
1376 1385 newheads.append(r)
1377 1386 if len(newheads) > len(remote_heads):
1378 1387 warn = 1
1379 1388
1380 1389 if warn:
1381 1390 self.ui.warn(_("abort: push creates new remote branches!\n"))
1382 1391 self.ui.status(_("(did you forget to merge?"
1383 1392 " use push -f to force)\n"))
1384 1393 return None, 1
1385 1394 elif inc:
1386 1395 self.ui.warn(_("note: unsynced remote changes!\n"))
1387 1396
1388 1397
1389 1398 if revs is None:
1390 1399 cg = self.changegroup(update, 'push')
1391 1400 else:
1392 1401 cg = self.changegroupsubset(update, revs, 'push')
1393 1402 return cg, remote_heads
1394 1403
1395 1404 def push_addchangegroup(self, remote, force, revs):
1396 1405 lock = remote.lock()
1397 1406
1398 1407 ret = self.prepush(remote, force, revs)
1399 1408 if ret[0] is not None:
1400 1409 cg, remote_heads = ret
1401 1410 return remote.addchangegroup(cg, 'push', self.url())
1402 1411 return ret[1]
1403 1412
1404 1413 def push_unbundle(self, remote, force, revs):
1405 1414 # local repo finds heads on server, finds out what revs it
1406 1415 # must push. once revs transferred, if server finds it has
1407 1416 # different heads (someone else won commit/push race), server
1408 1417 # aborts.
1409 1418
1410 1419 ret = self.prepush(remote, force, revs)
1411 1420 if ret[0] is not None:
1412 1421 cg, remote_heads = ret
1413 1422 if force: remote_heads = ['force']
1414 1423 return remote.unbundle(cg, remote_heads, 'push')
1415 1424 return ret[1]
1416 1425
1417 1426 def changegroupinfo(self, nodes):
1418 1427 self.ui.note(_("%d changesets found\n") % len(nodes))
1419 1428 if self.ui.debugflag:
1420 1429 self.ui.debug(_("List of changesets:\n"))
1421 1430 for node in nodes:
1422 1431 self.ui.debug("%s\n" % hex(node))
1423 1432
1424 1433 def changegroupsubset(self, bases, heads, source):
1425 1434 """This function generates a changegroup consisting of all the nodes
1426 1435 that are descendents of any of the bases, and ancestors of any of
1427 1436 the heads.
1428 1437
1429 1438 It is fairly complex as determining which filenodes and which
1430 1439 manifest nodes need to be included for the changeset to be complete
1431 1440 is non-trivial.
1432 1441
1433 1442 Another wrinkle is doing the reverse, figuring out which changeset in
1434 1443 the changegroup a particular filenode or manifestnode belongs to."""
1435 1444
1436 1445 self.hook('preoutgoing', throw=True, source=source)
1437 1446
1438 1447 # Set up some initial variables
1439 1448 # Make it easy to refer to self.changelog
1440 1449 cl = self.changelog
1441 1450 # msng is short for missing - compute the list of changesets in this
1442 1451 # changegroup.
1443 1452 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1444 1453 self.changegroupinfo(msng_cl_lst)
1445 1454 # Some bases may turn out to be superfluous, and some heads may be
1446 1455 # too. nodesbetween will return the minimal set of bases and heads
1447 1456 # necessary to re-create the changegroup.
1448 1457
1449 1458 # Known heads are the list of heads that it is assumed the recipient
1450 1459 # of this changegroup will know about.
1451 1460 knownheads = {}
1452 1461 # We assume that all parents of bases are known heads.
1453 1462 for n in bases:
1454 1463 for p in cl.parents(n):
1455 1464 if p != nullid:
1456 1465 knownheads[p] = 1
1457 1466 knownheads = knownheads.keys()
1458 1467 if knownheads:
1459 1468 # Now that we know what heads are known, we can compute which
1460 1469 # changesets are known. The recipient must know about all
1461 1470 # changesets required to reach the known heads from the null
1462 1471 # changeset.
1463 1472 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1464 1473 junk = None
1465 1474 # Transform the list into an ersatz set.
1466 1475 has_cl_set = dict.fromkeys(has_cl_set)
1467 1476 else:
1468 1477 # If there were no known heads, the recipient cannot be assumed to
1469 1478 # know about any changesets.
1470 1479 has_cl_set = {}
1471 1480
1472 1481 # Make it easy to refer to self.manifest
1473 1482 mnfst = self.manifest
1474 1483 # We don't know which manifests are missing yet
1475 1484 msng_mnfst_set = {}
1476 1485 # Nor do we know which filenodes are missing.
1477 1486 msng_filenode_set = {}
1478 1487
1479 1488 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1480 1489 junk = None
1481 1490
1482 1491 # A changeset always belongs to itself, so the changenode lookup
1483 1492 # function for a changenode is identity.
1484 1493 def identity(x):
1485 1494 return x
1486 1495
1487 1496 # A function generating function. Sets up an environment for the
1488 1497 # inner function.
1489 1498 def cmp_by_rev_func(revlog):
1490 1499 # Compare two nodes by their revision number in the environment's
1491 1500 # revision history. Since the revision number both represents the
1492 1501 # most efficient order to read the nodes in, and represents a
1493 1502 # topological sorting of the nodes, this function is often useful.
1494 1503 def cmp_by_rev(a, b):
1495 1504 return cmp(revlog.rev(a), revlog.rev(b))
1496 1505 return cmp_by_rev
1497 1506
1498 1507 # If we determine that a particular file or manifest node must be a
1499 1508 # node that the recipient of the changegroup will already have, we can
1500 1509 # also assume the recipient will have all the parents. This function
1501 1510 # prunes them from the set of missing nodes.
1502 1511 def prune_parents(revlog, hasset, msngset):
1503 1512 haslst = hasset.keys()
1504 1513 haslst.sort(cmp_by_rev_func(revlog))
1505 1514 for node in haslst:
1506 1515 parentlst = [p for p in revlog.parents(node) if p != nullid]
1507 1516 while parentlst:
1508 1517 n = parentlst.pop()
1509 1518 if n not in hasset:
1510 1519 hasset[n] = 1
1511 1520 p = [p for p in revlog.parents(n) if p != nullid]
1512 1521 parentlst.extend(p)
1513 1522 for n in hasset:
1514 1523 msngset.pop(n, None)
1515 1524
1516 1525 # This is a function generating function used to set up an environment
1517 1526 # for the inner function to execute in.
1518 1527 def manifest_and_file_collector(changedfileset):
1519 1528 # This is an information gathering function that gathers
1520 1529 # information from each changeset node that goes out as part of
1521 1530 # the changegroup. The information gathered is a list of which
1522 1531 # manifest nodes are potentially required (the recipient may
1523 1532 # already have them) and total list of all files which were
1524 1533 # changed in any changeset in the changegroup.
1525 1534 #
1526 1535 # We also remember the first changenode we saw any manifest
1527 1536 # referenced by so we can later determine which changenode 'owns'
1528 1537 # the manifest.
1529 1538 def collect_manifests_and_files(clnode):
1530 1539 c = cl.read(clnode)
1531 1540 for f in c[3]:
1532 1541 # This is to make sure we only have one instance of each
1533 1542 # filename string for each filename.
1534 1543 changedfileset.setdefault(f, f)
1535 1544 msng_mnfst_set.setdefault(c[0], clnode)
1536 1545 return collect_manifests_and_files
1537 1546
1538 1547 # Figure out which manifest nodes (of the ones we think might be part
1539 1548 # of the changegroup) the recipient must know about and remove them
1540 1549 # from the changegroup.
1541 1550 def prune_manifests():
1542 1551 has_mnfst_set = {}
1543 1552 for n in msng_mnfst_set:
1544 1553 # If a 'missing' manifest thinks it belongs to a changenode
1545 1554 # the recipient is assumed to have, obviously the recipient
1546 1555 # must have that manifest.
1547 1556 linknode = cl.node(mnfst.linkrev(n))
1548 1557 if linknode in has_cl_set:
1549 1558 has_mnfst_set[n] = 1
1550 1559 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1551 1560
1552 1561 # Use the information collected in collect_manifests_and_files to say
1553 1562 # which changenode any manifestnode belongs to.
1554 1563 def lookup_manifest_link(mnfstnode):
1555 1564 return msng_mnfst_set[mnfstnode]
1556 1565
1557 1566 # A function generating function that sets up the initial environment
1558 1567 # the inner function.
1559 1568 def filenode_collector(changedfiles):
1560 1569 next_rev = [0]
1561 1570 # This gathers information from each manifestnode included in the
1562 1571 # changegroup about which filenodes the manifest node references
1563 1572 # so we can include those in the changegroup too.
1564 1573 #
1565 1574 # It also remembers which changenode each filenode belongs to. It
1566 1575 # does this by assuming the a filenode belongs to the changenode
1567 1576 # the first manifest that references it belongs to.
1568 1577 def collect_msng_filenodes(mnfstnode):
1569 1578 r = mnfst.rev(mnfstnode)
1570 1579 if r == next_rev[0]:
1571 1580 # If the last rev we looked at was the one just previous,
1572 1581 # we only need to see a diff.
1573 1582 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1574 1583 # For each line in the delta
1575 1584 for dline in delta.splitlines():
1576 1585 # get the filename and filenode for that line
1577 1586 f, fnode = dline.split('\0')
1578 1587 fnode = bin(fnode[:40])
1579 1588 f = changedfiles.get(f, None)
1580 1589 # And if the file is in the list of files we care
1581 1590 # about.
1582 1591 if f is not None:
1583 1592 # Get the changenode this manifest belongs to
1584 1593 clnode = msng_mnfst_set[mnfstnode]
1585 1594 # Create the set of filenodes for the file if
1586 1595 # there isn't one already.
1587 1596 ndset = msng_filenode_set.setdefault(f, {})
1588 1597 # And set the filenode's changelog node to the
1589 1598 # manifest's if it hasn't been set already.
1590 1599 ndset.setdefault(fnode, clnode)
1591 1600 else:
1592 1601 # Otherwise we need a full manifest.
1593 1602 m = mnfst.read(mnfstnode)
1594 1603 # For every file in we care about.
1595 1604 for f in changedfiles:
1596 1605 fnode = m.get(f, None)
1597 1606 # If it's in the manifest
1598 1607 if fnode is not None:
1599 1608 # See comments above.
1600 1609 clnode = msng_mnfst_set[mnfstnode]
1601 1610 ndset = msng_filenode_set.setdefault(f, {})
1602 1611 ndset.setdefault(fnode, clnode)
1603 1612 # Remember the revision we hope to see next.
1604 1613 next_rev[0] = r + 1
1605 1614 return collect_msng_filenodes
1606 1615
1607 1616 # We have a list of filenodes we think we need for a file, lets remove
1608 1617 # all those we now the recipient must have.
1609 1618 def prune_filenodes(f, filerevlog):
1610 1619 msngset = msng_filenode_set[f]
1611 1620 hasset = {}
1612 1621 # If a 'missing' filenode thinks it belongs to a changenode we
1613 1622 # assume the recipient must have, then the recipient must have
1614 1623 # that filenode.
1615 1624 for n in msngset:
1616 1625 clnode = cl.node(filerevlog.linkrev(n))
1617 1626 if clnode in has_cl_set:
1618 1627 hasset[n] = 1
1619 1628 prune_parents(filerevlog, hasset, msngset)
1620 1629
1621 1630 # A function generator function that sets up the a context for the
1622 1631 # inner function.
1623 1632 def lookup_filenode_link_func(fname):
1624 1633 msngset = msng_filenode_set[fname]
1625 1634 # Lookup the changenode the filenode belongs to.
1626 1635 def lookup_filenode_link(fnode):
1627 1636 return msngset[fnode]
1628 1637 return lookup_filenode_link
1629 1638
1630 1639 # Now that we have all theses utility functions to help out and
1631 1640 # logically divide up the task, generate the group.
1632 1641 def gengroup():
1633 1642 # The set of changed files starts empty.
1634 1643 changedfiles = {}
1635 1644 # Create a changenode group generator that will call our functions
1636 1645 # back to lookup the owning changenode and collect information.
1637 1646 group = cl.group(msng_cl_lst, identity,
1638 1647 manifest_and_file_collector(changedfiles))
1639 1648 for chnk in group:
1640 1649 yield chnk
1641 1650
1642 1651 # The list of manifests has been collected by the generator
1643 1652 # calling our functions back.
1644 1653 prune_manifests()
1645 1654 msng_mnfst_lst = msng_mnfst_set.keys()
1646 1655 # Sort the manifestnodes by revision number.
1647 1656 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1648 1657 # Create a generator for the manifestnodes that calls our lookup
1649 1658 # and data collection functions back.
1650 1659 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1651 1660 filenode_collector(changedfiles))
1652 1661 for chnk in group:
1653 1662 yield chnk
1654 1663
1655 1664 # These are no longer needed, dereference and toss the memory for
1656 1665 # them.
1657 1666 msng_mnfst_lst = None
1658 1667 msng_mnfst_set.clear()
1659 1668
1660 1669 changedfiles = changedfiles.keys()
1661 1670 changedfiles.sort()
1662 1671 # Go through all our files in order sorted by name.
1663 1672 for fname in changedfiles:
1664 1673 filerevlog = self.file(fname)
1665 1674 # Toss out the filenodes that the recipient isn't really
1666 1675 # missing.
1667 1676 if msng_filenode_set.has_key(fname):
1668 1677 prune_filenodes(fname, filerevlog)
1669 1678 msng_filenode_lst = msng_filenode_set[fname].keys()
1670 1679 else:
1671 1680 msng_filenode_lst = []
1672 1681 # If any filenodes are left, generate the group for them,
1673 1682 # otherwise don't bother.
1674 1683 if len(msng_filenode_lst) > 0:
1675 1684 yield changegroup.genchunk(fname)
1676 1685 # Sort the filenodes by their revision #
1677 1686 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1678 1687 # Create a group generator and only pass in a changenode
1679 1688 # lookup function as we need to collect no information
1680 1689 # from filenodes.
1681 1690 group = filerevlog.group(msng_filenode_lst,
1682 1691 lookup_filenode_link_func(fname))
1683 1692 for chnk in group:
1684 1693 yield chnk
1685 1694 if msng_filenode_set.has_key(fname):
1686 1695 # Don't need this anymore, toss it to free memory.
1687 1696 del msng_filenode_set[fname]
1688 1697 # Signal that no more groups are left.
1689 1698 yield changegroup.closechunk()
1690 1699
1691 1700 if msng_cl_lst:
1692 1701 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1693 1702
1694 1703 return util.chunkbuffer(gengroup())
1695 1704
1696 1705 def changegroup(self, basenodes, source):
1697 1706 """Generate a changegroup of all nodes that we have that a recipient
1698 1707 doesn't.
1699 1708
1700 1709 This is much easier than the previous function as we can assume that
1701 1710 the recipient has any changenode we aren't sending them."""
1702 1711
1703 1712 self.hook('preoutgoing', throw=True, source=source)
1704 1713
1705 1714 cl = self.changelog
1706 1715 nodes = cl.nodesbetween(basenodes, None)[0]
1707 1716 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1708 1717 self.changegroupinfo(nodes)
1709 1718
1710 1719 def identity(x):
1711 1720 return x
1712 1721
1713 1722 def gennodelst(revlog):
1714 1723 for r in xrange(0, revlog.count()):
1715 1724 n = revlog.node(r)
1716 1725 if revlog.linkrev(n) in revset:
1717 1726 yield n
1718 1727
1719 1728 def changed_file_collector(changedfileset):
1720 1729 def collect_changed_files(clnode):
1721 1730 c = cl.read(clnode)
1722 1731 for fname in c[3]:
1723 1732 changedfileset[fname] = 1
1724 1733 return collect_changed_files
1725 1734
1726 1735 def lookuprevlink_func(revlog):
1727 1736 def lookuprevlink(n):
1728 1737 return cl.node(revlog.linkrev(n))
1729 1738 return lookuprevlink
1730 1739
1731 1740 def gengroup():
1732 1741 # construct a list of all changed files
1733 1742 changedfiles = {}
1734 1743
1735 1744 for chnk in cl.group(nodes, identity,
1736 1745 changed_file_collector(changedfiles)):
1737 1746 yield chnk
1738 1747 changedfiles = changedfiles.keys()
1739 1748 changedfiles.sort()
1740 1749
1741 1750 mnfst = self.manifest
1742 1751 nodeiter = gennodelst(mnfst)
1743 1752 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1744 1753 yield chnk
1745 1754
1746 1755 for fname in changedfiles:
1747 1756 filerevlog = self.file(fname)
1748 1757 nodeiter = gennodelst(filerevlog)
1749 1758 nodeiter = list(nodeiter)
1750 1759 if nodeiter:
1751 1760 yield changegroup.genchunk(fname)
1752 1761 lookup = lookuprevlink_func(filerevlog)
1753 1762 for chnk in filerevlog.group(nodeiter, lookup):
1754 1763 yield chnk
1755 1764
1756 1765 yield changegroup.closechunk()
1757 1766
1758 1767 if nodes:
1759 1768 self.hook('outgoing', node=hex(nodes[0]), source=source)
1760 1769
1761 1770 return util.chunkbuffer(gengroup())
1762 1771
1763 1772 def addchangegroup(self, source, srctype, url):
1764 1773 """add changegroup to repo.
1765 1774
1766 1775 return values:
1767 1776 - nothing changed or no source: 0
1768 1777 - more heads than before: 1+added heads (2..n)
1769 1778 - less heads than before: -1-removed heads (-2..-n)
1770 1779 - number of heads stays the same: 1
1771 1780 """
1772 1781 def csmap(x):
1773 1782 self.ui.debug(_("add changeset %s\n") % short(x))
1774 1783 return cl.count()
1775 1784
1776 1785 def revmap(x):
1777 1786 return cl.rev(x)
1778 1787
1779 1788 if not source:
1780 1789 return 0
1781 1790
1782 1791 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1783 1792
1784 1793 changesets = files = revisions = 0
1785 1794
1786 1795 tr = self.transaction()
1787 1796
1788 1797 # write changelog data to temp files so concurrent readers will not see
1789 1798 # inconsistent view
1790 1799 cl = None
1791 1800 try:
1792 1801 cl = appendfile.appendchangelog(self.sopener,
1793 1802 self.changelog.version)
1794 1803
1795 1804 oldheads = len(cl.heads())
1796 1805
1797 1806 # pull off the changeset group
1798 1807 self.ui.status(_("adding changesets\n"))
1799 1808 cor = cl.count() - 1
1800 1809 chunkiter = changegroup.chunkiter(source)
1801 1810 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1802 1811 raise util.Abort(_("received changelog group is empty"))
1803 1812 cnr = cl.count() - 1
1804 1813 changesets = cnr - cor
1805 1814
1806 1815 # pull off the manifest group
1807 1816 self.ui.status(_("adding manifests\n"))
1808 1817 chunkiter = changegroup.chunkiter(source)
1809 1818 # no need to check for empty manifest group here:
1810 1819 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 1820 # no new manifest will be created and the manifest group will
1812 1821 # be empty during the pull
1813 1822 self.manifest.addgroup(chunkiter, revmap, tr)
1814 1823
1815 1824 # process the files
1816 1825 self.ui.status(_("adding file changes\n"))
1817 1826 while 1:
1818 1827 f = changegroup.getchunk(source)
1819 1828 if not f:
1820 1829 break
1821 1830 self.ui.debug(_("adding %s revisions\n") % f)
1822 1831 fl = self.file(f)
1823 1832 o = fl.count()
1824 1833 chunkiter = changegroup.chunkiter(source)
1825 1834 if fl.addgroup(chunkiter, revmap, tr) is None:
1826 1835 raise util.Abort(_("received file revlog group is empty"))
1827 1836 revisions += fl.count() - o
1828 1837 files += 1
1829 1838
1830 1839 cl.writedata()
1831 1840 finally:
1832 1841 if cl:
1833 1842 cl.cleanup()
1834 1843
1835 1844 # make changelog see real files again
1836 1845 self.changelog = changelog.changelog(self.sopener,
1837 1846 self.changelog.version)
1838 1847 self.changelog.checkinlinesize(tr)
1839 1848
1840 1849 newheads = len(self.changelog.heads())
1841 1850 heads = ""
1842 1851 if oldheads and newheads != oldheads:
1843 1852 heads = _(" (%+d heads)") % (newheads - oldheads)
1844 1853
1845 1854 self.ui.status(_("added %d changesets"
1846 1855 " with %d changes to %d files%s\n")
1847 1856 % (changesets, revisions, files, heads))
1848 1857
1849 1858 if changesets > 0:
1850 1859 self.hook('pretxnchangegroup', throw=True,
1851 1860 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 1861 url=url)
1853 1862
1854 1863 tr.close()
1855 1864
1856 1865 if changesets > 0:
1857 1866 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 1867 source=srctype, url=url)
1859 1868
1860 1869 for i in xrange(cor + 1, cnr + 1):
1861 1870 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 1871 source=srctype, url=url)
1863 1872
1864 1873 # never return 0 here:
1865 1874 if newheads < oldheads:
1866 1875 return newheads - oldheads - 1
1867 1876 else:
1868 1877 return newheads - oldheads + 1
1869 1878
1870 1879
1871 1880 def stream_in(self, remote):
1872 1881 fp = remote.stream_out()
1873 1882 l = fp.readline()
1874 1883 try:
1875 1884 resp = int(l)
1876 1885 except ValueError:
1877 1886 raise util.UnexpectedOutput(
1878 1887 _('Unexpected response from remote server:'), l)
1879 1888 if resp == 1:
1880 1889 raise util.Abort(_('operation forbidden by server'))
1881 1890 elif resp == 2:
1882 1891 raise util.Abort(_('locking the remote repository failed'))
1883 1892 elif resp != 0:
1884 1893 raise util.Abort(_('the server sent an unknown error code'))
1885 1894 self.ui.status(_('streaming all changes\n'))
1886 1895 l = fp.readline()
1887 1896 try:
1888 1897 total_files, total_bytes = map(int, l.split(' ', 1))
1889 1898 except ValueError, TypeError:
1890 1899 raise util.UnexpectedOutput(
1891 1900 _('Unexpected response from remote server:'), l)
1892 1901 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 1902 (total_files, util.bytecount(total_bytes)))
1894 1903 start = time.time()
1895 1904 for i in xrange(total_files):
1896 1905 # XXX doesn't support '\n' or '\r' in filenames
1897 1906 l = fp.readline()
1898 1907 try:
1899 1908 name, size = l.split('\0', 1)
1900 1909 size = int(size)
1901 1910 except ValueError, TypeError:
1902 1911 raise util.UnexpectedOutput(
1903 1912 _('Unexpected response from remote server:'), l)
1904 1913 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 1914 ofp = self.sopener(name, 'w')
1906 1915 for chunk in util.filechunkiter(fp, limit=size):
1907 1916 ofp.write(chunk)
1908 1917 ofp.close()
1909 1918 elapsed = time.time() - start
1910 1919 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1911 1920 (util.bytecount(total_bytes), elapsed,
1912 1921 util.bytecount(total_bytes / elapsed)))
1913 1922 self.reload()
1914 1923 return len(self.heads()) + 1
1915 1924
1916 1925 def clone(self, remote, heads=[], stream=False):
1917 1926 '''clone remote repository.
1918 1927
1919 1928 keyword arguments:
1920 1929 heads: list of revs to clone (forces use of pull)
1921 1930 stream: use streaming clone if possible'''
1922 1931
1923 1932 # now, all clients that can request uncompressed clones can
1924 1933 # read repo formats supported by all servers that can serve
1925 1934 # them.
1926 1935
1927 1936 # if revlog format changes, client will have to check version
1928 1937 # and format flags on "stream" capability, and use
1929 1938 # uncompressed only if compatible.
1930 1939
1931 1940 if stream and not heads and remote.capable('stream'):
1932 1941 return self.stream_in(remote)
1933 1942 return self.pull(remote, heads)
1934 1943
1935 1944 # used to avoid circular references so destructors work
1936 1945 def aftertrans(files):
1937 1946 renamefiles = [tuple(t) for t in files]
1938 1947 def a():
1939 1948 for src, dest in renamefiles:
1940 1949 util.rename(src, dest)
1941 1950 return a
1942 1951
1943 1952 def instance(ui, path, create):
1944 1953 return localrepository(ui, util.drop_scheme('file', path), create)
1945 1954
1946 1955 def islocal(path):
1947 1956 return True
@@ -1,79 +1,86 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import *
11 11 from i18n import gettext as _
12 12 demandload(globals(), "changelog filelog httprangereader")
13 13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 raise IOError(None, inst)
21 21 except urllib2.URLError, inst:
22 22 raise IOError(None, inst.reason[1])
23 23
24 24 def opener(base):
25 25 """return a function that opens files over http"""
26 26 p = base
27 27 def o(path, mode="r"):
28 28 f = "/".join((p, urllib.quote(path)))
29 29 return rangereader(f)
30 30 return o
31 31
32 32 class statichttprepository(localrepo.localrepository):
33 33 def __init__(self, ui, path):
34 34 self._url = path
35 self.path = (path + "/.hg")
36 self.spath = self.path
37 35 self.ui = ui
38 36 self.revlogversion = 0
37
38 self.path = (path + "/.hg")
39 39 self.opener = opener(self.path)
40 40 # find requirements
41 41 try:
42 42 requirements = self.opener("requires").read().splitlines()
43 43 except IOError:
44 44 requirements = []
45 45 # check them
46 46 for r in requirements:
47 47 if r not in self.supported:
48 48 raise repo.RepoError(_("requirement '%s' not supported") % r)
49 49
50 50 # setup store
51 self.spath = self.path
52 self.sopener = opener(self.spath)
51 if "store" in requirements:
52 self.encodefn = util.encodefilename
53 self.decodefn = util.decodefilename
54 self.spath = self.path + "/store"
55 else:
56 self.encodefn = lambda x: x
57 self.decodefn = lambda x: x
58 self.spath = self.path
59 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
53 60
54 61 self.manifest = manifest.manifest(self.sopener)
55 62 self.changelog = changelog.changelog(self.sopener)
56 63 self.tagscache = None
57 64 self.nodetagscache = None
58 65 self.encodepats = None
59 66 self.decodepats = None
60 67
61 68 def url(self):
62 69 return 'static-' + self._url
63 70
64 71 def dev(self):
65 72 return -1
66 73
67 74 def local(self):
68 75 return False
69 76
70 77 def instance(ui, path, create):
71 78 if create:
72 79 raise util.Abort(_('cannot create new static-http repository'))
73 80 if path.startswith('old-http:'):
74 81 ui.warn(_("old-http:// syntax is deprecated, "
75 82 "please use static-http:// instead\n"))
76 83 path = path[4:]
77 84 else:
78 85 path = path[7:]
79 86 return statichttprepository(ui, path)
@@ -1,95 +1,97 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from i18n import gettext as _
10 10 demandload(globals(), "os stat util lock")
11 11
12 12 # if server supports streaming clone, it advertises "stream"
13 13 # capability with value that is version+flags of repo it is serving.
14 14 # client only streams if it can read that repo format.
15 15
16 16 def walkrepo(root):
17 17 '''iterate over metadata files in repository.
18 18 walk in natural (sorted) order.
19 19 yields 2-tuples: name of .d or .i file, size of file.'''
20 20
21 21 strip_count = len(root) + len(os.sep)
22 22 def walk(path, recurse):
23 23 ents = os.listdir(path)
24 24 ents.sort()
25 25 for e in ents:
26 26 pe = os.path.join(path, e)
27 27 st = os.lstat(pe)
28 28 if stat.S_ISDIR(st.st_mode):
29 29 if recurse:
30 30 for x in walk(pe, True):
31 31 yield x
32 32 else:
33 33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 34 continue
35 35 sfx = e[-2:]
36 36 if sfx in ('.d', '.i'):
37 37 yield pe[strip_count:], st.st_size
38 38 # write file data first
39 39 for x in walk(os.path.join(root, 'data'), True):
40 40 yield x
41 41 # write manifest before changelog
42 42 meta = list(walk(root, False))
43 43 meta.sort()
44 44 meta.reverse()
45 45 for x in meta:
46 46 yield x
47 47
48 48 # stream file format is simple.
49 49 #
50 50 # server writes out line that says how many files, how many total
51 51 # bytes. separator is ascii space, byte counts are strings.
52 52 #
53 53 # then for each file:
54 54 #
55 55 # server writes out line that says file name, how many bytes in
56 56 # file. separator is ascii nul, byte count is string.
57 57 #
58 58 # server writes out raw file data.
59 59
60 60 def stream_out(repo, fileobj):
61 61 '''stream out all metadata files in repository.
62 62 writes to file-like object, must support write() and optional flush().'''
63 63
64 64 if not repo.ui.configbool('server', 'uncompressed'):
65 65 fileobj.write('1\n')
66 66 return
67 67
68 68 # get consistent snapshot of repo. lock during scan so lock not
69 69 # needed while we stream, and commits can happen.
70 70 try:
71 71 repolock = repo.lock()
72 72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 74 fileobj.write('2\n')
75 75 return
76 76
77 77 fileobj.write('0\n')
78 78 repo.ui.debug('scanning\n')
79 79 entries = []
80 80 total_bytes = 0
81 81 for name, size in walkrepo(repo.spath):
82 if repo.decodefn:
83 name = repo.decodefn(name)
82 84 entries.append((name, size))
83 85 total_bytes += size
84 86 repolock.release()
85 87
86 88 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 89 (len(entries), total_bytes))
88 90 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 91 for name, size in entries:
90 92 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 93 fileobj.write('%s\0%d\n' % (name, size))
92 94 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 95 fileobj.write(chunk)
94 96 flush = getattr(fileobj, 'flush', None)
95 97 if flush: flush()
@@ -1,1210 +1,1214 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import gettext as _
16 16 from demandload import *
17 17 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
18 18 demandload(globals(), "os threading time calendar ConfigParser locale")
19 19
20 20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding()
21 21 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
22 22
23 23 def tolocal(s):
24 24 """
25 25 Convert a string from internal UTF-8 to local encoding
26 26
27 27 All internal strings should be UTF-8 but some repos before the
28 28 implementation of locale support may contain latin1 or possibly
29 29 other character sets. We attempt to decode everything strictly
30 30 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
31 31 replace unknown characters.
32 32 """
33 33 for e in "utf-8 latin1".split():
34 34 try:
35 35 u = s.decode(e) # attempt strict decoding
36 36 return u.encode(_encoding, "replace")
37 37 except UnicodeDecodeError:
38 38 pass
39 39 u = s.decode("utf-8", "replace") # last ditch
40 40 return u.encode(_encoding, "replace")
41 41
42 42 def fromlocal(s):
43 43 """
44 44 Convert a string from the local character encoding to UTF-8
45 45
46 46 We attempt to decode strings using the encoding mode set by
47 47 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
48 48 characters will cause an error message. Other modes include
49 49 'replace', which replaces unknown characters with a special
50 50 Unicode character, and 'ignore', which drops the character.
51 51 """
52 52 try:
53 53 return s.decode(_encoding, _encodingmode).encode("utf-8")
54 54 except UnicodeDecodeError, inst:
55 55 sub = s[max(0, inst.start-10):inst.start+10]
56 56 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
57 57
58 58 def locallen(s):
59 59 """Find the length in characters of a local string"""
60 60 return len(s.decode(_encoding, "replace"))
61 61
62 62 def localsub(s, a, b=None):
63 63 try:
64 64 u = s.decode(_encoding, _encodingmode)
65 65 if b is not None:
66 66 u = u[a:b]
67 67 else:
68 68 u = u[:a]
69 69 return u.encode(_encoding, _encodingmode)
70 70 except UnicodeDecodeError, inst:
71 71 sub = s[max(0, inst.start-10), inst.start+10]
72 72 raise Abort("decoding near '%s': %s!\n" % (sub, inst))
73 73
74 74 # used by parsedate
75 75 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
76 76 '%a %b %d %H:%M:%S %Y')
77 77
78 78 class SignalInterrupt(Exception):
79 79 """Exception raised on SIGTERM and SIGHUP."""
80 80
81 81 # like SafeConfigParser but with case-sensitive keys
82 82 class configparser(ConfigParser.SafeConfigParser):
83 83 def optionxform(self, optionstr):
84 84 return optionstr
85 85
86 86 def cachefunc(func):
87 87 '''cache the result of function calls'''
88 88 # XXX doesn't handle keywords args
89 89 cache = {}
90 90 if func.func_code.co_argcount == 1:
91 91 # we gain a small amount of time because
92 92 # we don't need to pack/unpack the list
93 93 def f(arg):
94 94 if arg not in cache:
95 95 cache[arg] = func(arg)
96 96 return cache[arg]
97 97 else:
98 98 def f(*args):
99 99 if args not in cache:
100 100 cache[args] = func(*args)
101 101 return cache[args]
102 102
103 103 return f
104 104
105 105 def pipefilter(s, cmd):
106 106 '''filter string S through command CMD, returning its output'''
107 107 (pout, pin) = popen2.popen2(cmd, -1, 'b')
108 108 def writer():
109 109 try:
110 110 pin.write(s)
111 111 pin.close()
112 112 except IOError, inst:
113 113 if inst.errno != errno.EPIPE:
114 114 raise
115 115
116 116 # we should use select instead on UNIX, but this will work on most
117 117 # systems, including Windows
118 118 w = threading.Thread(target=writer)
119 119 w.start()
120 120 f = pout.read()
121 121 pout.close()
122 122 w.join()
123 123 return f
124 124
125 125 def tempfilter(s, cmd):
126 126 '''filter string S through a pair of temporary files with CMD.
127 127 CMD is used as a template to create the real command to be run,
128 128 with the strings INFILE and OUTFILE replaced by the real names of
129 129 the temporary files generated.'''
130 130 inname, outname = None, None
131 131 try:
132 132 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
133 133 fp = os.fdopen(infd, 'wb')
134 134 fp.write(s)
135 135 fp.close()
136 136 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
137 137 os.close(outfd)
138 138 cmd = cmd.replace('INFILE', inname)
139 139 cmd = cmd.replace('OUTFILE', outname)
140 140 code = os.system(cmd)
141 141 if code: raise Abort(_("command '%s' failed: %s") %
142 142 (cmd, explain_exit(code)))
143 143 return open(outname, 'rb').read()
144 144 finally:
145 145 try:
146 146 if inname: os.unlink(inname)
147 147 except: pass
148 148 try:
149 149 if outname: os.unlink(outname)
150 150 except: pass
151 151
152 152 filtertable = {
153 153 'tempfile:': tempfilter,
154 154 'pipe:': pipefilter,
155 155 }
156 156
157 157 def filter(s, cmd):
158 158 "filter a string through a command that transforms its input to its output"
159 159 for name, fn in filtertable.iteritems():
160 160 if cmd.startswith(name):
161 161 return fn(s, cmd[len(name):].lstrip())
162 162 return pipefilter(s, cmd)
163 163
164 164 def find_in_path(name, path, default=None):
165 165 '''find name in search path. path can be string (will be split
166 166 with os.pathsep), or iterable thing that returns strings. if name
167 167 found, return path to name. else return default.'''
168 168 if isinstance(path, str):
169 169 path = path.split(os.pathsep)
170 170 for p in path:
171 171 p_name = os.path.join(p, name)
172 172 if os.path.exists(p_name):
173 173 return p_name
174 174 return default
175 175
176 176 def binary(s):
177 177 """return true if a string is binary data using diff's heuristic"""
178 178 if s and '\0' in s[:4096]:
179 179 return True
180 180 return False
181 181
182 182 def unique(g):
183 183 """return the uniq elements of iterable g"""
184 184 seen = {}
185 185 l = []
186 186 for f in g:
187 187 if f not in seen:
188 188 seen[f] = 1
189 189 l.append(f)
190 190 return l
191 191
192 192 class Abort(Exception):
193 193 """Raised if a command needs to print an error and exit."""
194 194
195 195 class UnexpectedOutput(Abort):
196 196 """Raised to print an error with part of output and exit."""
197 197
198 198 def always(fn): return True
199 199 def never(fn): return False
200 200
201 201 def patkind(name, dflt_pat='glob'):
202 202 """Split a string into an optional pattern kind prefix and the
203 203 actual pattern."""
204 204 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
205 205 if name.startswith(prefix + ':'): return name.split(':', 1)
206 206 return dflt_pat, name
207 207
208 208 def globre(pat, head='^', tail='$'):
209 209 "convert a glob pattern into a regexp"
210 210 i, n = 0, len(pat)
211 211 res = ''
212 212 group = False
213 213 def peek(): return i < n and pat[i]
214 214 while i < n:
215 215 c = pat[i]
216 216 i = i+1
217 217 if c == '*':
218 218 if peek() == '*':
219 219 i += 1
220 220 res += '.*'
221 221 else:
222 222 res += '[^/]*'
223 223 elif c == '?':
224 224 res += '.'
225 225 elif c == '[':
226 226 j = i
227 227 if j < n and pat[j] in '!]':
228 228 j += 1
229 229 while j < n and pat[j] != ']':
230 230 j += 1
231 231 if j >= n:
232 232 res += '\\['
233 233 else:
234 234 stuff = pat[i:j].replace('\\','\\\\')
235 235 i = j + 1
236 236 if stuff[0] == '!':
237 237 stuff = '^' + stuff[1:]
238 238 elif stuff[0] == '^':
239 239 stuff = '\\' + stuff
240 240 res = '%s[%s]' % (res, stuff)
241 241 elif c == '{':
242 242 group = True
243 243 res += '(?:'
244 244 elif c == '}' and group:
245 245 res += ')'
246 246 group = False
247 247 elif c == ',' and group:
248 248 res += '|'
249 249 elif c == '\\':
250 250 p = peek()
251 251 if p:
252 252 i += 1
253 253 res += re.escape(p)
254 254 else:
255 255 res += re.escape(c)
256 256 else:
257 257 res += re.escape(c)
258 258 return head + res + tail
259 259
260 260 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
261 261
262 262 def pathto(n1, n2):
263 263 '''return the relative path from one place to another.
264 264 n1 should use os.sep to separate directories
265 265 n2 should use "/" to separate directories
266 266 returns an os.sep-separated path.
267 267 '''
268 268 if not n1: return localpath(n2)
269 269 a, b = n1.split(os.sep), n2.split('/')
270 270 a.reverse()
271 271 b.reverse()
272 272 while a and b and a[-1] == b[-1]:
273 273 a.pop()
274 274 b.pop()
275 275 b.reverse()
276 276 return os.sep.join((['..'] * len(a)) + b)
277 277
278 278 def canonpath(root, cwd, myname):
279 279 """return the canonical path of myname, given cwd and root"""
280 280 if root == os.sep:
281 281 rootsep = os.sep
282 282 elif root.endswith(os.sep):
283 283 rootsep = root
284 284 else:
285 285 rootsep = root + os.sep
286 286 name = myname
287 287 if not os.path.isabs(name):
288 288 name = os.path.join(root, cwd, name)
289 289 name = os.path.normpath(name)
290 290 if name != rootsep and name.startswith(rootsep):
291 291 name = name[len(rootsep):]
292 292 audit_path(name)
293 293 return pconvert(name)
294 294 elif name == root:
295 295 return ''
296 296 else:
297 297 # Determine whether `name' is in the hierarchy at or beneath `root',
298 298 # by iterating name=dirname(name) until that causes no change (can't
299 299 # check name == '/', because that doesn't work on windows). For each
300 300 # `name', compare dev/inode numbers. If they match, the list `rel'
301 301 # holds the reversed list of components making up the relative file
302 302 # name we want.
303 303 root_st = os.stat(root)
304 304 rel = []
305 305 while True:
306 306 try:
307 307 name_st = os.stat(name)
308 308 except OSError:
309 309 break
310 310 if samestat(name_st, root_st):
311 311 rel.reverse()
312 312 name = os.path.join(*rel)
313 313 audit_path(name)
314 314 return pconvert(name)
315 315 dirname, basename = os.path.split(name)
316 316 rel.append(basename)
317 317 if dirname == name:
318 318 break
319 319 name = dirname
320 320
321 321 raise Abort('%s not under root' % myname)
322 322
323 323 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
324 324 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
325 325
326 326 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
327 327 if os.name == 'nt':
328 328 dflt_pat = 'glob'
329 329 else:
330 330 dflt_pat = 'relpath'
331 331 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
332 332
333 333 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
334 334 """build a function to match a set of file patterns
335 335
336 336 arguments:
337 337 canonroot - the canonical root of the tree you're matching against
338 338 cwd - the current working directory, if relevant
339 339 names - patterns to find
340 340 inc - patterns to include
341 341 exc - patterns to exclude
342 342 head - a regex to prepend to patterns to control whether a match is rooted
343 343
344 344 a pattern is one of:
345 345 'glob:<rooted glob>'
346 346 're:<rooted regexp>'
347 347 'path:<rooted path>'
348 348 'relglob:<relative glob>'
349 349 'relpath:<relative path>'
350 350 'relre:<relative regexp>'
351 351 '<rooted path or regexp>'
352 352
353 353 returns:
354 354 a 3-tuple containing
355 355 - list of explicit non-pattern names passed in
356 356 - a bool match(filename) function
357 357 - a bool indicating if any patterns were passed in
358 358
359 359 todo:
360 360 make head regex a rooted bool
361 361 """
362 362
363 363 def contains_glob(name):
364 364 for c in name:
365 365 if c in _globchars: return True
366 366 return False
367 367
368 368 def regex(kind, name, tail):
369 369 '''convert a pattern into a regular expression'''
370 370 if kind == 're':
371 371 return name
372 372 elif kind == 'path':
373 373 return '^' + re.escape(name) + '(?:/|$)'
374 374 elif kind == 'relglob':
375 375 return head + globre(name, '(?:|.*/)', tail)
376 376 elif kind == 'relpath':
377 377 return head + re.escape(name) + tail
378 378 elif kind == 'relre':
379 379 if name.startswith('^'):
380 380 return name
381 381 return '.*' + name
382 382 return head + globre(name, '', tail)
383 383
384 384 def matchfn(pats, tail):
385 385 """build a matching function from a set of patterns"""
386 386 if not pats:
387 387 return
388 388 matches = []
389 389 for k, p in pats:
390 390 try:
391 391 pat = '(?:%s)' % regex(k, p, tail)
392 392 matches.append(re.compile(pat).match)
393 393 except re.error:
394 394 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
395 395 else: raise Abort("invalid pattern (%s): %s" % (k, p))
396 396
397 397 def buildfn(text):
398 398 for m in matches:
399 399 r = m(text)
400 400 if r:
401 401 return r
402 402
403 403 return buildfn
404 404
405 405 def globprefix(pat):
406 406 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
407 407 root = []
408 408 for p in pat.split(os.sep):
409 409 if contains_glob(p): break
410 410 root.append(p)
411 411 return '/'.join(root)
412 412
413 413 pats = []
414 414 files = []
415 415 roots = []
416 416 for kind, name in [patkind(p, dflt_pat) for p in names]:
417 417 if kind in ('glob', 'relpath'):
418 418 name = canonpath(canonroot, cwd, name)
419 419 if name == '':
420 420 kind, name = 'glob', '**'
421 421 if kind in ('glob', 'path', 're'):
422 422 pats.append((kind, name))
423 423 if kind == 'glob':
424 424 root = globprefix(name)
425 425 if root: roots.append(root)
426 426 elif kind == 'relpath':
427 427 files.append((kind, name))
428 428 roots.append(name)
429 429
430 430 patmatch = matchfn(pats, '$') or always
431 431 filematch = matchfn(files, '(?:/|$)') or always
432 432 incmatch = always
433 433 if inc:
434 434 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
435 435 incmatch = matchfn(inckinds, '(?:/|$)')
436 436 excmatch = lambda fn: False
437 437 if exc:
438 438 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
439 439 excmatch = matchfn(exckinds, '(?:/|$)')
440 440
441 441 return (roots,
442 442 lambda fn: (incmatch(fn) and not excmatch(fn) and
443 443 (fn.endswith('/') or
444 444 (not pats and not files) or
445 445 (pats and patmatch(fn)) or
446 446 (files and filematch(fn)))),
447 447 (inc or exc or (pats and pats != [('glob', '**')])) and True)
448 448
449 449 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
450 450 '''enhanced shell command execution.
451 451 run with environment maybe modified, maybe in different dir.
452 452
453 453 if command fails and onerr is None, return status. if ui object,
454 454 print error message and return status, else raise onerr object as
455 455 exception.'''
456 456 def py2shell(val):
457 457 'convert python object into string that is useful to shell'
458 458 if val in (None, False):
459 459 return '0'
460 460 if val == True:
461 461 return '1'
462 462 return str(val)
463 463 oldenv = {}
464 464 for k in environ:
465 465 oldenv[k] = os.environ.get(k)
466 466 if cwd is not None:
467 467 oldcwd = os.getcwd()
468 468 try:
469 469 for k, v in environ.iteritems():
470 470 os.environ[k] = py2shell(v)
471 471 if cwd is not None and oldcwd != cwd:
472 472 os.chdir(cwd)
473 473 rc = os.system(cmd)
474 474 if rc and onerr:
475 475 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
476 476 explain_exit(rc)[0])
477 477 if errprefix:
478 478 errmsg = '%s: %s' % (errprefix, errmsg)
479 479 try:
480 480 onerr.warn(errmsg + '\n')
481 481 except AttributeError:
482 482 raise onerr(errmsg)
483 483 return rc
484 484 finally:
485 485 for k, v in oldenv.iteritems():
486 486 if v is None:
487 487 del os.environ[k]
488 488 else:
489 489 os.environ[k] = v
490 490 if cwd is not None and oldcwd != cwd:
491 491 os.chdir(oldcwd)
492 492
493 493 def rename(src, dst):
494 494 """forcibly rename a file"""
495 495 try:
496 496 os.rename(src, dst)
497 497 except OSError, err:
498 498 # on windows, rename to existing file is not allowed, so we
499 499 # must delete destination first. but if file is open, unlink
500 500 # schedules it for delete but does not delete it. rename
501 501 # happens immediately even for open files, so we create
502 502 # temporary file, delete it, rename destination to that name,
503 503 # then delete that. then rename is safe to do.
504 504 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
505 505 os.close(fd)
506 506 os.unlink(temp)
507 507 os.rename(dst, temp)
508 508 os.unlink(temp)
509 509 os.rename(src, dst)
510 510
511 511 def unlink(f):
512 512 """unlink and remove the directory if it is empty"""
513 513 os.unlink(f)
514 514 # try removing directories that might now be empty
515 515 try:
516 516 os.removedirs(os.path.dirname(f))
517 517 except OSError:
518 518 pass
519 519
520 520 def copyfile(src, dest):
521 521 "copy a file, preserving mode"
522 522 try:
523 523 shutil.copyfile(src, dest)
524 524 shutil.copymode(src, dest)
525 525 except shutil.Error, inst:
526 526 raise util.Abort(str(inst))
527 527
528 528 def copyfiles(src, dst, hardlink=None):
529 529 """Copy a directory tree using hardlinks if possible"""
530 530
531 531 if hardlink is None:
532 532 hardlink = (os.stat(src).st_dev ==
533 533 os.stat(os.path.dirname(dst)).st_dev)
534 534
535 535 if os.path.isdir(src):
536 536 os.mkdir(dst)
537 537 for name in os.listdir(src):
538 538 srcname = os.path.join(src, name)
539 539 dstname = os.path.join(dst, name)
540 540 copyfiles(srcname, dstname, hardlink)
541 541 else:
542 542 if hardlink:
543 543 try:
544 544 os_link(src, dst)
545 545 except (IOError, OSError):
546 546 hardlink = False
547 547 shutil.copy(src, dst)
548 548 else:
549 549 shutil.copy(src, dst)
550 550
551 551 def audit_path(path):
552 552 """Abort if path contains dangerous components"""
553 553 parts = os.path.normcase(path).split(os.sep)
554 554 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
555 555 or os.pardir in parts):
556 556 raise Abort(_("path contains illegal component: %s\n") % path)
557 557
558 558 def _makelock_file(info, pathname):
559 559 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
560 560 os.write(ld, info)
561 561 os.close(ld)
562 562
563 563 def _readlock_file(pathname):
564 564 return posixfile(pathname).read()
565 565
566 566 def nlinks(pathname):
567 567 """Return number of hardlinks for the given file."""
568 568 return os.lstat(pathname).st_nlink
569 569
570 570 if hasattr(os, 'link'):
571 571 os_link = os.link
572 572 else:
573 573 def os_link(src, dst):
574 574 raise OSError(0, _("Hardlinks not supported"))
575 575
576 576 def fstat(fp):
577 577 '''stat file object that may not have fileno method.'''
578 578 try:
579 579 return os.fstat(fp.fileno())
580 580 except AttributeError:
581 581 return os.stat(fp.name)
582 582
583 583 posixfile = file
584 584
585 585 def is_win_9x():
586 586 '''return true if run on windows 95, 98 or me.'''
587 587 try:
588 588 return sys.getwindowsversion()[3] == 1
589 589 except AttributeError:
590 590 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
591 591
592 592 getuser_fallback = None
593 593
594 594 def getuser():
595 595 '''return name of current user'''
596 596 try:
597 597 return getpass.getuser()
598 598 except ImportError:
599 599 # import of pwd will fail on windows - try fallback
600 600 if getuser_fallback:
601 601 return getuser_fallback()
602 602 # raised if win32api not available
603 603 raise Abort(_('user name not available - set USERNAME '
604 604 'environment variable'))
605 605
606 606 def username(uid=None):
607 607 """Return the name of the user with the given uid.
608 608
609 609 If uid is None, return the name of the current user."""
610 610 try:
611 611 import pwd
612 612 if uid is None:
613 613 uid = os.getuid()
614 614 try:
615 615 return pwd.getpwuid(uid)[0]
616 616 except KeyError:
617 617 return str(uid)
618 618 except ImportError:
619 619 return None
620 620
621 621 def groupname(gid=None):
622 622 """Return the name of the group with the given gid.
623 623
624 624 If gid is None, return the name of the current group."""
625 625 try:
626 626 import grp
627 627 if gid is None:
628 628 gid = os.getgid()
629 629 try:
630 630 return grp.getgrgid(gid)[0]
631 631 except KeyError:
632 632 return str(gid)
633 633 except ImportError:
634 634 return None
635 635
636 636 # File system features
637 637
638 638 def checkfolding(path):
639 639 """
640 640 Check whether the given path is on a case-sensitive filesystem
641 641
642 642 Requires a path (like /foo/.hg) ending with a foldable final
643 643 directory component.
644 644 """
645 645 s1 = os.stat(path)
646 646 d, b = os.path.split(path)
647 647 p2 = os.path.join(d, b.upper())
648 648 if path == p2:
649 649 p2 = os.path.join(d, b.lower())
650 650 try:
651 651 s2 = os.stat(p2)
652 652 if s2 == s1:
653 653 return False
654 654 return True
655 655 except:
656 656 return True
657 657
658 658 # Platform specific variants
659 659 if os.name == 'nt':
660 660 demandload(globals(), "msvcrt")
661 661 nulldev = 'NUL:'
662 662
663 663 class winstdout:
664 664 '''stdout on windows misbehaves if sent through a pipe'''
665 665
666 666 def __init__(self, fp):
667 667 self.fp = fp
668 668
669 669 def __getattr__(self, key):
670 670 return getattr(self.fp, key)
671 671
672 672 def close(self):
673 673 try:
674 674 self.fp.close()
675 675 except: pass
676 676
677 677 def write(self, s):
678 678 try:
679 679 return self.fp.write(s)
680 680 except IOError, inst:
681 681 if inst.errno != 0: raise
682 682 self.close()
683 683 raise IOError(errno.EPIPE, 'Broken pipe')
684 684
685 685 sys.stdout = winstdout(sys.stdout)
686 686
687 687 def system_rcpath():
688 688 try:
689 689 return system_rcpath_win32()
690 690 except:
691 691 return [r'c:\mercurial\mercurial.ini']
692 692
693 693 def os_rcpath():
694 694 '''return default os-specific hgrc search path'''
695 695 path = system_rcpath()
696 696 path.append(user_rcpath())
697 697 userprofile = os.environ.get('USERPROFILE')
698 698 if userprofile:
699 699 path.append(os.path.join(userprofile, 'mercurial.ini'))
700 700 return path
701 701
702 702 def user_rcpath():
703 703 '''return os-specific hgrc search path to the user dir'''
704 704 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
705 705
706 706 def parse_patch_output(output_line):
707 707 """parses the output produced by patch and returns the file name"""
708 708 pf = output_line[14:]
709 709 if pf[0] == '`':
710 710 pf = pf[1:-1] # Remove the quotes
711 711 return pf
712 712
713 713 def testpid(pid):
714 714 '''return False if pid dead, True if running or not known'''
715 715 return True
716 716
717 717 def is_exec(f, last):
718 718 return last
719 719
720 720 def set_exec(f, mode):
721 721 pass
722 722
723 723 def set_binary(fd):
724 724 msvcrt.setmode(fd.fileno(), os.O_BINARY)
725 725
726 726 def pconvert(path):
727 727 return path.replace("\\", "/")
728 728
729 729 def localpath(path):
730 730 return path.replace('/', '\\')
731 731
732 732 def normpath(path):
733 733 return pconvert(os.path.normpath(path))
734 734
735 735 makelock = _makelock_file
736 736 readlock = _readlock_file
737 737
738 738 def samestat(s1, s2):
739 739 return False
740 740
741 741 def shellquote(s):
742 742 return '"%s"' % s.replace('"', '\\"')
743 743
744 744 def explain_exit(code):
745 745 return _("exited with status %d") % code, code
746 746
747 747 # if you change this stub into a real check, please try to implement the
748 748 # username and groupname functions above, too.
749 749 def isowner(fp, st=None):
750 750 return True
751 751
752 752 try:
753 753 # override functions with win32 versions if possible
754 754 from util_win32 import *
755 755 if not is_win_9x():
756 756 posixfile = posixfile_nt
757 757 except ImportError:
758 758 pass
759 759
760 760 else:
761 761 nulldev = '/dev/null'
762 762
763 763 def rcfiles(path):
764 764 rcs = [os.path.join(path, 'hgrc')]
765 765 rcdir = os.path.join(path, 'hgrc.d')
766 766 try:
767 767 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
768 768 if f.endswith(".rc")])
769 769 except OSError:
770 770 pass
771 771 return rcs
772 772
773 773 def os_rcpath():
774 774 '''return default os-specific hgrc search path'''
775 775 path = []
776 776 # old mod_python does not set sys.argv
777 777 if len(getattr(sys, 'argv', [])) > 0:
778 778 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
779 779 '/../etc/mercurial'))
780 780 path.extend(rcfiles('/etc/mercurial'))
781 781 path.append(os.path.expanduser('~/.hgrc'))
782 782 path = [os.path.normpath(f) for f in path]
783 783 return path
784 784
785 785 def parse_patch_output(output_line):
786 786 """parses the output produced by patch and returns the file name"""
787 787 pf = output_line[14:]
788 788 if pf.startswith("'") and pf.endswith("'") and " " in pf:
789 789 pf = pf[1:-1] # Remove the quotes
790 790 return pf
791 791
792 792 def is_exec(f, last):
793 793 """check whether a file is executable"""
794 794 return (os.lstat(f).st_mode & 0100 != 0)
795 795
796 796 def set_exec(f, mode):
797 797 s = os.lstat(f).st_mode
798 798 if (s & 0100 != 0) == mode:
799 799 return
800 800 if mode:
801 801 # Turn on +x for every +r bit when making a file executable
802 802 # and obey umask.
803 803 umask = os.umask(0)
804 804 os.umask(umask)
805 805 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
806 806 else:
807 807 os.chmod(f, s & 0666)
808 808
809 809 def set_binary(fd):
810 810 pass
811 811
812 812 def pconvert(path):
813 813 return path
814 814
815 815 def localpath(path):
816 816 return path
817 817
818 818 normpath = os.path.normpath
819 819 samestat = os.path.samestat
820 820
821 821 def makelock(info, pathname):
822 822 try:
823 823 os.symlink(info, pathname)
824 824 except OSError, why:
825 825 if why.errno == errno.EEXIST:
826 826 raise
827 827 else:
828 828 _makelock_file(info, pathname)
829 829
830 830 def readlock(pathname):
831 831 try:
832 832 return os.readlink(pathname)
833 833 except OSError, why:
834 834 if why.errno == errno.EINVAL:
835 835 return _readlock_file(pathname)
836 836 else:
837 837 raise
838 838
839 839 def shellquote(s):
840 840 return "'%s'" % s.replace("'", "'\\''")
841 841
842 842 def testpid(pid):
843 843 '''return False if pid dead, True if running or not sure'''
844 844 try:
845 845 os.kill(pid, 0)
846 846 return True
847 847 except OSError, inst:
848 848 return inst.errno != errno.ESRCH
849 849
850 850 def explain_exit(code):
851 851 """return a 2-tuple (desc, code) describing a process's status"""
852 852 if os.WIFEXITED(code):
853 853 val = os.WEXITSTATUS(code)
854 854 return _("exited with status %d") % val, val
855 855 elif os.WIFSIGNALED(code):
856 856 val = os.WTERMSIG(code)
857 857 return _("killed by signal %d") % val, val
858 858 elif os.WIFSTOPPED(code):
859 859 val = os.WSTOPSIG(code)
860 860 return _("stopped by signal %d") % val, val
861 861 raise ValueError(_("invalid exit code"))
862 862
863 863 def isowner(fp, st=None):
864 864 """Return True if the file object f belongs to the current user.
865 865
866 866 The return value of a util.fstat(f) may be passed as the st argument.
867 867 """
868 868 if st is None:
869 869 st = fstat(f)
870 870 return st.st_uid == os.getuid()
871 871
872 872 def _buildencodefun():
873 873 e = '_'
874 874 win_reserved = [ord(x) for x in '|\?*<":>+[]']
875 875 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
876 876 for x in (range(32) + range(126, 256) + win_reserved):
877 877 cmap[chr(x)] = "~%02x" % x
878 878 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
879 879 cmap[chr(x)] = e + chr(x).lower()
880 880 dmap = {}
881 881 for k, v in cmap.iteritems():
882 882 dmap[v] = k
883 883 def decode(s):
884 884 i = 0
885 885 while i < len(s):
886 886 for l in xrange(1, 4):
887 887 try:
888 888 yield dmap[s[i:i+l]]
889 889 i += l
890 890 break
891 891 except KeyError:
892 892 pass
893 893 else:
894 894 raise KeyError
895 895 return (lambda s: "".join([cmap[c] for c in s]),
896 896 lambda s: "".join(list(decode(s))))
897 897
898 898 encodefilename, decodefilename = _buildencodefun()
899 899
900 def encodedopener(openerfn, fn):
901 def o(path, *args, **kw):
902 return openerfn(fn(path), *args, **kw)
903 return o
900 904
901 905 def opener(base, audit=True):
902 906 """
903 907 return a function that opens files relative to base
904 908
905 909 this function is used to hide the details of COW semantics and
906 910 remote file access from higher level code.
907 911 """
908 912 p = base
909 913 audit_p = audit
910 914
911 915 def mktempcopy(name):
912 916 d, fn = os.path.split(name)
913 917 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
914 918 os.close(fd)
915 919 ofp = posixfile(temp, "wb")
916 920 try:
917 921 try:
918 922 ifp = posixfile(name, "rb")
919 923 except IOError, inst:
920 924 if not getattr(inst, 'filename', None):
921 925 inst.filename = name
922 926 raise
923 927 for chunk in filechunkiter(ifp):
924 928 ofp.write(chunk)
925 929 ifp.close()
926 930 ofp.close()
927 931 except:
928 932 try: os.unlink(temp)
929 933 except: pass
930 934 raise
931 935 st = os.lstat(name)
932 936 os.chmod(temp, st.st_mode)
933 937 return temp
934 938
935 939 class atomictempfile(posixfile):
936 940 """the file will only be copied when rename is called"""
937 941 def __init__(self, name, mode):
938 942 self.__name = name
939 943 self.temp = mktempcopy(name)
940 944 posixfile.__init__(self, self.temp, mode)
941 945 def rename(self):
942 946 if not self.closed:
943 947 posixfile.close(self)
944 948 rename(self.temp, localpath(self.__name))
945 949 def __del__(self):
946 950 if not self.closed:
947 951 try:
948 952 os.unlink(self.temp)
949 953 except: pass
950 954 posixfile.close(self)
951 955
952 956 class atomicfile(atomictempfile):
953 957 """the file will only be copied on close"""
954 958 def __init__(self, name, mode):
955 959 atomictempfile.__init__(self, name, mode)
956 960 def close(self):
957 961 self.rename()
958 962 def __del__(self):
959 963 self.rename()
960 964
961 965 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
962 966 if audit_p:
963 967 audit_path(path)
964 968 f = os.path.join(p, path)
965 969
966 970 if not text:
967 971 mode += "b" # for that other OS
968 972
969 973 if mode[0] != "r":
970 974 try:
971 975 nlink = nlinks(f)
972 976 except OSError:
973 977 d = os.path.dirname(f)
974 978 if not os.path.isdir(d):
975 979 os.makedirs(d)
976 980 else:
977 981 if atomic:
978 982 return atomicfile(f, mode)
979 983 elif atomictemp:
980 984 return atomictempfile(f, mode)
981 985 if nlink > 1:
982 986 rename(mktempcopy(f), f)
983 987 return posixfile(f, mode)
984 988
985 989 return o
986 990
987 991 class chunkbuffer(object):
988 992 """Allow arbitrary sized chunks of data to be efficiently read from an
989 993 iterator over chunks of arbitrary size."""
990 994
991 995 def __init__(self, in_iter, targetsize = 2**16):
992 996 """in_iter is the iterator that's iterating over the input chunks.
993 997 targetsize is how big a buffer to try to maintain."""
994 998 self.in_iter = iter(in_iter)
995 999 self.buf = ''
996 1000 self.targetsize = int(targetsize)
997 1001 if self.targetsize <= 0:
998 1002 raise ValueError(_("targetsize must be greater than 0, was %d") %
999 1003 targetsize)
1000 1004 self.iterempty = False
1001 1005
1002 1006 def fillbuf(self):
1003 1007 """Ignore target size; read every chunk from iterator until empty."""
1004 1008 if not self.iterempty:
1005 1009 collector = cStringIO.StringIO()
1006 1010 collector.write(self.buf)
1007 1011 for ch in self.in_iter:
1008 1012 collector.write(ch)
1009 1013 self.buf = collector.getvalue()
1010 1014 self.iterempty = True
1011 1015
1012 1016 def read(self, l):
1013 1017 """Read L bytes of data from the iterator of chunks of data.
1014 1018 Returns less than L bytes if the iterator runs dry."""
1015 1019 if l > len(self.buf) and not self.iterempty:
1016 1020 # Clamp to a multiple of self.targetsize
1017 1021 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1018 1022 collector = cStringIO.StringIO()
1019 1023 collector.write(self.buf)
1020 1024 collected = len(self.buf)
1021 1025 for chunk in self.in_iter:
1022 1026 collector.write(chunk)
1023 1027 collected += len(chunk)
1024 1028 if collected >= targetsize:
1025 1029 break
1026 1030 if collected < targetsize:
1027 1031 self.iterempty = True
1028 1032 self.buf = collector.getvalue()
1029 1033 s, self.buf = self.buf[:l], buffer(self.buf, l)
1030 1034 return s
1031 1035
1032 1036 def filechunkiter(f, size=65536, limit=None):
1033 1037 """Create a generator that produces the data in the file size
1034 1038 (default 65536) bytes at a time, up to optional limit (default is
1035 1039 to read all data). Chunks may be less than size bytes if the
1036 1040 chunk is the last chunk in the file, or the file is a socket or
1037 1041 some other type of file that sometimes reads less data than is
1038 1042 requested."""
1039 1043 assert size >= 0
1040 1044 assert limit is None or limit >= 0
1041 1045 while True:
1042 1046 if limit is None: nbytes = size
1043 1047 else: nbytes = min(limit, size)
1044 1048 s = nbytes and f.read(nbytes)
1045 1049 if not s: break
1046 1050 if limit: limit -= len(s)
1047 1051 yield s
1048 1052
1049 1053 def makedate():
1050 1054 lt = time.localtime()
1051 1055 if lt[8] == 1 and time.daylight:
1052 1056 tz = time.altzone
1053 1057 else:
1054 1058 tz = time.timezone
1055 1059 return time.mktime(lt), tz
1056 1060
1057 1061 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1058 1062 """represent a (unixtime, offset) tuple as a localized time.
1059 1063 unixtime is seconds since the epoch, and offset is the time zone's
1060 1064 number of seconds away from UTC. if timezone is false, do not
1061 1065 append time zone to string."""
1062 1066 t, tz = date or makedate()
1063 1067 s = time.strftime(format, time.gmtime(float(t) - tz))
1064 1068 if timezone:
1065 1069 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1066 1070 return s
1067 1071
1068 1072 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
1069 1073 """parse a localized time string and return a (unixtime, offset) tuple.
1070 1074 if the string cannot be parsed, ValueError is raised."""
1071 1075 def hastimezone(string):
1072 1076 return (string[-4:].isdigit() and
1073 1077 (string[-5] == '+' or string[-5] == '-') and
1074 1078 string[-6].isspace())
1075 1079
1076 1080 # NOTE: unixtime = localunixtime + offset
1077 1081 if hastimezone(string):
1078 1082 date, tz = string[:-6], string[-5:]
1079 1083 tz = int(tz)
1080 1084 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1081 1085 else:
1082 1086 date, offset = string, None
1083 1087 timetuple = time.strptime(date, format)
1084 1088 localunixtime = int(calendar.timegm(timetuple))
1085 1089 if offset is None:
1086 1090 # local timezone
1087 1091 unixtime = int(time.mktime(timetuple))
1088 1092 offset = unixtime - localunixtime
1089 1093 else:
1090 1094 unixtime = localunixtime + offset
1091 1095 return unixtime, offset
1092 1096
1093 1097 def parsedate(string, formats=None):
1094 1098 """parse a localized time string and return a (unixtime, offset) tuple.
1095 1099 The date may be a "unixtime offset" string or in one of the specified
1096 1100 formats."""
1097 1101 if not formats:
1098 1102 formats = defaultdateformats
1099 1103 try:
1100 1104 when, offset = map(int, string.split(' '))
1101 1105 except ValueError:
1102 1106 for format in formats:
1103 1107 try:
1104 1108 when, offset = strdate(string, format)
1105 1109 except ValueError:
1106 1110 pass
1107 1111 else:
1108 1112 break
1109 1113 else:
1110 1114 raise ValueError(_('invalid date: %r '
1111 1115 'see hg(1) manual page for details')
1112 1116 % string)
1113 1117 # validate explicit (probably user-specified) date and
1114 1118 # time zone offset. values must fit in signed 32 bits for
1115 1119 # current 32-bit linux runtimes. timezones go from UTC-12
1116 1120 # to UTC+14
1117 1121 if abs(when) > 0x7fffffff:
1118 1122 raise ValueError(_('date exceeds 32 bits: %d') % when)
1119 1123 if offset < -50400 or offset > 43200:
1120 1124 raise ValueError(_('impossible time zone offset: %d') % offset)
1121 1125 return when, offset
1122 1126
1123 1127 def shortuser(user):
1124 1128 """Return a short representation of a user name or email address."""
1125 1129 f = user.find('@')
1126 1130 if f >= 0:
1127 1131 user = user[:f]
1128 1132 f = user.find('<')
1129 1133 if f >= 0:
1130 1134 user = user[f+1:]
1131 1135 f = user.find(' ')
1132 1136 if f >= 0:
1133 1137 user = user[:f]
1134 1138 f = user.find('.')
1135 1139 if f >= 0:
1136 1140 user = user[:f]
1137 1141 return user
1138 1142
1139 1143 def ellipsis(text, maxlength=400):
1140 1144 """Trim string to at most maxlength (default: 400) characters."""
1141 1145 if len(text) <= maxlength:
1142 1146 return text
1143 1147 else:
1144 1148 return "%s..." % (text[:maxlength-3])
1145 1149
1146 1150 def walkrepos(path):
1147 1151 '''yield every hg repository under path, recursively.'''
1148 1152 def errhandler(err):
1149 1153 if err.filename == path:
1150 1154 raise err
1151 1155
1152 1156 for root, dirs, files in os.walk(path, onerror=errhandler):
1153 1157 for d in dirs:
1154 1158 if d == '.hg':
1155 1159 yield root
1156 1160 dirs[:] = []
1157 1161 break
1158 1162
1159 1163 _rcpath = None
1160 1164
1161 1165 def rcpath():
1162 1166 '''return hgrc search path. if env var HGRCPATH is set, use it.
1163 1167 for each item in path, if directory, use files ending in .rc,
1164 1168 else use item.
1165 1169 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1166 1170 if no HGRCPATH, use default os-specific path.'''
1167 1171 global _rcpath
1168 1172 if _rcpath is None:
1169 1173 if 'HGRCPATH' in os.environ:
1170 1174 _rcpath = []
1171 1175 for p in os.environ['HGRCPATH'].split(os.pathsep):
1172 1176 if not p: continue
1173 1177 if os.path.isdir(p):
1174 1178 for f in os.listdir(p):
1175 1179 if f.endswith('.rc'):
1176 1180 _rcpath.append(os.path.join(p, f))
1177 1181 else:
1178 1182 _rcpath.append(p)
1179 1183 else:
1180 1184 _rcpath = os_rcpath()
1181 1185 return _rcpath
1182 1186
1183 1187 def bytecount(nbytes):
1184 1188 '''return byte count formatted as readable string, with units'''
1185 1189
1186 1190 units = (
1187 1191 (100, 1<<30, _('%.0f GB')),
1188 1192 (10, 1<<30, _('%.1f GB')),
1189 1193 (1, 1<<30, _('%.2f GB')),
1190 1194 (100, 1<<20, _('%.0f MB')),
1191 1195 (10, 1<<20, _('%.1f MB')),
1192 1196 (1, 1<<20, _('%.2f MB')),
1193 1197 (100, 1<<10, _('%.0f KB')),
1194 1198 (10, 1<<10, _('%.1f KB')),
1195 1199 (1, 1<<10, _('%.2f KB')),
1196 1200 (1, 1, _('%.0f bytes')),
1197 1201 )
1198 1202
1199 1203 for multiplier, divisor, format in units:
1200 1204 if nbytes >= divisor * multiplier:
1201 1205 return format % (nbytes / float(divisor))
1202 1206 return units[-1][2] % nbytes
1203 1207
1204 1208 def drop_scheme(scheme, path):
1205 1209 sc = scheme + ':'
1206 1210 if path.startswith(sc):
1207 1211 path = path[len(sc):]
1208 1212 if path.startswith('//'):
1209 1213 path = path[2:]
1210 1214 return path
@@ -1,113 +1,113 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0" -d "1000000 0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1" -d "1000000 0"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2" -d "1000000 0"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3" -d "1000000 0"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1" -d "1000000 0"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2" -d "1000000 0"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3" -d "1000000 0"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m" -d "1000000 0"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m" -d "1000000 0"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 mkdir test-"$i"
53 53 hg --cwd test-"$i" init
54 54 hg -R test bundle -r "$i" test-"$i".hg test-"$i"
55 55 cd test-"$i"
56 56 hg unbundle ../test-"$i".hg
57 57 hg verify
58 58 hg tip -q
59 59 cd ..
60 60 done
61 61 cd test-8
62 62 hg pull ../test-7
63 63 hg verify
64 64 hg rollback
65 65 cd ..
66 66
67 67 echo % should fail
68 68 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg test-3
69 69 hg -R test bundle -r tip test-bundle-branch1.hg
70 70
71 71 hg -R test bundle --base 2 -r tip test-bundle-branch1.hg
72 72 hg -R test bundle --base 2 -r 7 test-bundle-branch2.hg
73 73 hg -R test bundle --base 2 test-bundle-all.hg
74 74 hg -R test bundle --base 3 -r tip test-bundle-should-fail.hg
75 75
76 76 # issue76 msg2163
77 77 hg -R test bundle --base 3 -r 3 -r 3 test-bundle-cset-3.hg
78 78
79 79 hg clone test-2 test-9
80 80 cd test-9
81 81 echo % 2
82 82 hg tip -q
83 83 hg unbundle ../test-bundle-should-fail.hg
84 84 echo % 2
85 85 hg tip -q
86 86 hg unbundle ../test-bundle-all.hg
87 87 echo % 8
88 88 hg tip -q
89 89 hg verify
90 90 hg rollback
91 91 echo % 2
92 92 hg tip -q
93 93 hg unbundle ../test-bundle-branch1.hg
94 94 echo % 4
95 95 hg tip -q
96 96 hg verify
97 97 hg rollback
98 98 hg unbundle ../test-bundle-branch2.hg
99 99 echo % 6
100 100 hg tip -q
101 101 hg verify
102 102
103 103 cd ../test
104 104 hg merge 7
105 105 hg ci -m merge -d "1000000 0"
106 106 cd ..
107 107 hg -R test bundle --base 2 test-bundle-head.hg
108 108 hg clone test-2 test-10
109 109 cd test-10
110 110 hg unbundle ../test-bundle-head.hg
111 111 echo % 9
112 112 hg tip -q
113 113 hg verify
@@ -1,59 +1,59 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 hg clone -r "$i" test test-"$i"
53 53 cd test-"$i"
54 54 hg verify
55 55 cd ..
56 56 done
57 57 cd test-8
58 58 hg pull ../test-7
59 59 hg verify
@@ -1,14 +1,14 b''
1 1 #!/bin/sh
2 2
3 3 hg init dir
4 4 cd dir
5 5 echo bleh > bar
6 6 hg add bar
7 7 hg ci -m 'add bar'
8 8
9 9 hg cp bar foo
10 10 echo >> bar
11 11 hg ci -m 'cp bar foo; change bar'
12 12
13 13 hg debugrename foo
14 hg debugindex .hg/data/bar.i
14 hg debugindex .hg/store/data/bar.i
@@ -1,30 +1,30 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo a > a
5 5 hg add a
6 6 hg commit -m "1" -d "1000000 0"
7 7 hg status
8 8 hg copy a b
9 9 hg status
10 10 hg --debug commit -m "2" -d "1000000 0"
11 11 echo "we should see two history entries"
12 12 hg history -v
13 13 echo "we should see one log entry for a"
14 14 hg log a
15 15 echo "this should show a revision linked to changeset 0"
16 hg debugindex .hg/data/a.i
16 hg debugindex .hg/store/data/a.i
17 17 echo "we should see one log entry for b"
18 18 hg log b
19 19 echo "this should show a revision linked to changeset 1"
20 hg debugindex .hg/data/b.i
20 hg debugindex .hg/store/data/b.i
21 21
22 22 echo "this should show the rename information in the metadata"
23 hg debugdata .hg/data/b.d 0 | head -3 | tail -2
23 hg debugdata .hg/store/data/b.d 0 | head -3 | tail -2
24 24
25 $TESTDIR/md5sum.py .hg/data/b.i
25 $TESTDIR/md5sum.py .hg/store/data/b.i
26 26 hg cat b > bsum
27 27 $TESTDIR/md5sum.py bsum
28 28 hg cat a > asum
29 29 $TESTDIR/md5sum.py asum
30 30 hg verify
@@ -1,51 +1,51 b''
1 1 A b
2 2 b
3 3 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
4 4 we should see two history entries
5 5 changeset: 1:386a3cc01532
6 6 tag: tip
7 7 user: test
8 8 date: Mon Jan 12 13:46:40 1970 +0000
9 9 files: b
10 10 description:
11 11 2
12 12
13 13
14 14 changeset: 0:33aaa84a386b
15 15 user: test
16 16 date: Mon Jan 12 13:46:40 1970 +0000
17 17 files: a
18 18 description:
19 19 1
20 20
21 21
22 22 we should see one log entry for a
23 23 changeset: 0:33aaa84a386b
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: 1
27 27
28 28 this should show a revision linked to changeset 0
29 29 rev offset length base linkrev nodeid p1 p2
30 30 0 0 3 0 0 b789fdd96dc2 000000000000 000000000000
31 31 we should see one log entry for b
32 32 changeset: 1:386a3cc01532
33 33 tag: tip
34 34 user: test
35 35 date: Mon Jan 12 13:46:40 1970 +0000
36 36 summary: 2
37 37
38 38 this should show a revision linked to changeset 1
39 39 rev offset length base linkrev nodeid p1 p2
40 40 0 0 65 0 1 9a263dd772e0 000000000000 000000000000
41 41 this should show the rename information in the metadata
42 42 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
43 43 copy: a
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/data/b.i
44 ed156f22f0a6fde642de0b5eba0cbbb2 .hg/store/data/b.i
45 45 60b725f10c9c85c70d97880dfe8191b3 bsum
46 46 60b725f10c9c85c70d97880dfe8191b3 asum
47 47 checking changesets
48 48 checking manifests
49 49 crosschecking files in changesets and manifests
50 50 checking files
51 51 2 files, 2 changesets, 2 total revisions
@@ -1,41 +1,41 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo foo > foo
5 5 hg add foo
6 6 hg commit -m1 -d"0 0"
7 7
8 8 echo "# should show copy"
9 9 hg copy foo bar
10 10 hg debugstate|grep '^copy'
11 11
12 12 echo "# shouldn't show copy"
13 13 hg commit -m2 -d"0 0"
14 14 hg debugstate|grep '^copy'
15 15
16 16 echo "# should match"
17 hg debugindex .hg/data/foo.i
17 hg debugindex .hg/store/data/foo.i
18 18 hg debugrename bar
19 19
20 20 echo bleah > foo
21 21 echo quux > bar
22 22 hg commit -m3 -d"0 0"
23 23
24 24 echo "# should not be renamed"
25 25 hg debugrename bar
26 26
27 27 hg copy -f foo bar
28 28 echo "# should show copy"
29 29 hg debugstate|grep '^copy'
30 30 hg commit -m3 -d"0 0"
31 31
32 32 echo "# should show no parents for tip"
33 hg debugindex .hg/data/bar.i
33 hg debugindex .hg/store/data/bar.i
34 34 echo "# should match"
35 hg debugindex .hg/data/foo.i
35 hg debugindex .hg/store/data/foo.i
36 36 hg debugrename bar
37 37
38 38 echo "# should show no copies"
39 39 hg debugstate|grep '^copy'
40 40
41 41 exit 0
@@ -1,49 +1,49 b''
1 1 #!/bin/sh
2 2 #
3 3 # A B
4 4 #
5 5 # 3 4 3
6 6 # |\/| |\
7 7 # |/\| | \
8 8 # 1 2 1 2
9 9 # \ / \ /
10 10 # 0 0
11 11 #
12 12 # if the result of the merge of 1 and 2
13 13 # is the same in 3 and 4, no new manifest
14 14 # will be created and the manifest group
15 15 # will be empty during the pull
16 16 #
17 17 # (plus we test a failure where outgoing
18 18 # wrongly reported the number of csets)
19 19 #
20 20
21 21 hg init a
22 22 cd a
23 23 touch init
24 24 hg ci -A -m 0 -d "1000000 0"
25 25 touch x y
26 26 hg ci -A -m 1 -d "1000000 0"
27 27 hg update 0
28 28 touch x y
29 29 hg ci -A -m 2 -d "1000000 0"
30 30 hg merge 1
31 31 hg ci -A -m m1 -d "1000000 0"
32 32 #hg log
33 #hg debugindex .hg/00manifest.i
33 #hg debugindex .hg/store/00manifest.i
34 34 hg update -C 1
35 35 hg merge 2
36 36 hg ci -A -m m2 -d "1000000 0"
37 37 #hg log
38 #hg debugindex .hg/00manifest.i
38 #hg debugindex .hg/store/00manifest.i
39 39
40 40 cd ..
41 41 hg clone -r 3 a b
42 42 hg clone -r 4 a c
43 43 hg -R a outgoing b
44 44 hg -R a outgoing c
45 45 hg -R b outgoing c
46 46 hg -R c outgoing b
47 47
48 48 hg -R b pull a
49 49 hg -R c pull a
@@ -1,34 +1,34 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4
5 5 cat > .hg/hgrc <<EOF
6 6 [encode]
7 7 *.gz = gunzip
8 8
9 9 [decode]
10 10 *.gz = gzip
11 11
12 12 EOF
13 13
14 14 echo "this is a test" | gzip > a.gz
15 15 hg add a.gz
16 16 hg ci -m "test" -d "1000000 0"
17 17 echo %% no changes
18 18 hg status
19 19 touch a.gz
20 20
21 21 echo %% no changes
22 22 hg status
23 23
24 24 echo %% uncompressed contents in repo
25 hg debugdata .hg/data/a.gz.d 0
25 hg debugdata .hg/store/data/a.gz.d 0
26 26
27 27 echo %% uncompress our working dir copy
28 28 gunzip < a.gz
29 29
30 30 rm a.gz
31 31 hg co
32 32
33 33 echo %% uncompress our new working dir copy
34 34 gunzip < a.gz
@@ -1,46 +1,46 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4
5 5 echo foo > a
6 6 echo foo > b
7 7 hg add a b
8 8
9 9 hg ci -m "test" -d "1000000 0"
10 10
11 11 echo blah > a
12 12
13 13 hg ci -m "branch a" -d "1000000 0"
14 14
15 15 hg co 0
16 16
17 17 echo blah > b
18 18
19 19 hg ci -m "branch b" -d "1000000 0"
20 20 HGMERGE=true hg merge 1
21 21
22 22 hg ci -m "merge b/a -> blah" -d "1000000 0"
23 23
24 24 hg co 1
25 25 HGMERGE=true hg merge 2
26 26 hg ci -m "merge a/b -> blah" -d "1000000 0"
27 27
28 28 hg log
29 hg debugindex .hg/00changelog.i
29 hg debugindex .hg/store/00changelog.i
30 30
31 31 echo
32 32
33 33 echo 1
34 34 hg manifest --debug 1
35 35 echo 2
36 36 hg manifest --debug 2
37 37 echo 3
38 38 hg manifest --debug 3
39 39 echo 4
40 40 hg manifest --debug 4
41 41
42 42 echo
43 43
44 hg debugindex .hg/data/a.i
44 hg debugindex .hg/store/data/a.i
45 45
46 46 hg verify
@@ -1,79 +1,79 b''
1 1 #!/bin/sh
2 2
3 3 # This test makes sure that we don't mark a file as merged with its ancestor
4 4 # when we do a merge.
5 5
6 6 cat <<'EOF' > merge
7 7 #!/bin/sh
8 8 echo merging for `basename $1`
9 9 EOF
10 10 chmod +x merge
11 11
12 12 echo creating base
13 13 hg init a
14 14 cd a
15 15 echo 1 > foo
16 16 echo 1 > bar
17 17 echo 1 > baz
18 18 echo 1 > quux
19 19 hg add foo bar baz quux
20 20 hg commit -m "base" -d "1000000 0"
21 21
22 22 cd ..
23 23 hg clone a b
24 24
25 25 echo creating branch a
26 26 cd a
27 27 echo 2a > foo
28 28 echo 2a > bar
29 29 hg commit -m "branch a" -d "1000000 0"
30 30
31 31 echo creating branch b
32 32
33 33 cd ..
34 34 cd b
35 35 echo 2b > foo
36 36 echo 2b > baz
37 37 hg commit -m "branch b" -d "1000000 0"
38 38
39 39 echo "we shouldn't have anything but n state here"
40 40 hg debugstate | cut -b 1-16,35-
41 41
42 42 echo merging
43 43 hg pull ../a
44 44 env HGMERGE=../merge hg merge -v
45 45
46 46 echo 2m > foo
47 47 echo 2b > baz
48 48 echo new > quux
49 49
50 50 echo "we shouldn't have anything but foo in merge state here"
51 51 hg debugstate | cut -b 1-16,35- | grep "^m"
52 52
53 53 hg ci -m "merge" -d "1000000 0"
54 54
55 55 echo "main: we should have a merge here"
56 hg debugindex .hg/00changelog.i
56 hg debugindex .hg/store/00changelog.i
57 57
58 58 echo "log should show foo and quux changed"
59 59 hg log -v -r tip
60 60
61 61 echo "foo: we should have a merge here"
62 hg debugindex .hg/data/foo.i
62 hg debugindex .hg/store/data/foo.i
63 63
64 64 echo "bar: we shouldn't have a merge here"
65 hg debugindex .hg/data/bar.i
65 hg debugindex .hg/store/data/bar.i
66 66
67 67 echo "baz: we shouldn't have a merge here"
68 hg debugindex .hg/data/baz.i
68 hg debugindex .hg/store/data/baz.i
69 69
70 70 echo "quux: we shouldn't have a merge here"
71 hg debugindex .hg/data/quux.i
71 hg debugindex .hg/store/data/quux.i
72 72
73 73 echo "manifest entries should match tips of all files"
74 74 hg manifest --debug
75 75
76 76 echo "everything should be clean now"
77 77 hg status
78 78
79 79 hg verify
@@ -1,48 +1,48 b''
1 1 #!/bin/sh -e
2 2
3 3 umask 027
4 4 mkdir test1
5 5 cd test1
6 6
7 7 hg init
8 8 touch a b
9 9 hg add a b
10 10 hg ci -m "added a b" -d "1000000 0"
11 11
12 12 cd ..
13 13 hg clone test1 test3
14 14 mkdir test2
15 15 cd test2
16 16
17 17 hg init
18 18 hg pull ../test1
19 19 hg co
20 20 chmod +x a
21 21 hg ci -m "chmod +x a" -d "1000000 0"
22 22
23 23 cd ../test1
24 24 echo 123 >>a
25 25 hg ci -m "a updated" -d "1000000 0"
26 26
27 27 hg pull ../test2
28 28 hg heads
29 29 hg history
30 30
31 31 hg -v merge
32 32
33 33 cd ../test3
34 34 echo 123 >>b
35 35 hg ci -m "b updated" -d "1000000 0"
36 36
37 37 hg pull ../test2
38 38 hg heads
39 39 hg history
40 40
41 41 hg -v merge
42 42
43 43 ls -l ../test[123]/a > foo
44 44 cut -b 1-10 < foo
45 45
46 hg debugindex .hg/data/a.i
47 hg debugindex ../test2/.hg/data/a.i
48 hg debugindex ../test1/.hg/data/a.i
46 hg debugindex .hg/store/data/a.i
47 hg debugindex ../test2/.hg/store/data/a.i
48 hg debugindex ../test1/.hg/store/data/a.i
@@ -1,78 +1,78 b''
1 1 #!/bin/sh
2 2
3 3 hg init remote
4 4 cd remote
5 5 echo "# creating 'remote'"
6 6 cat >>afile <<EOF
7 7 0
8 8 EOF
9 9 hg add afile
10 10 hg commit -m "0.0"
11 11 cat >>afile <<EOF
12 12 1
13 13 EOF
14 14 hg commit -m "0.1"
15 15 cat >>afile <<EOF
16 16 2
17 17 EOF
18 18 hg commit -m "0.2"
19 19 cat >>afile <<EOF
20 20 3
21 21 EOF
22 22 hg commit -m "0.3"
23 23 hg update -C 0
24 24 cat >>afile <<EOF
25 25 1
26 26 EOF
27 27 hg commit -m "1.1"
28 28 cat >>afile <<EOF
29 29 2
30 30 EOF
31 31 hg commit -m "1.2"
32 32 cat >fred <<EOF
33 33 a line
34 34 EOF
35 35 cat >>afile <<EOF
36 36 3
37 37 EOF
38 38 hg add fred
39 39 hg commit -m "1.3"
40 40 hg mv afile adifferentfile
41 41 hg commit -m "1.3m"
42 42 hg update -C 3
43 43 hg mv afile anotherfile
44 44 hg commit -m "0.3m"
45 hg debugindex .hg/data/afile.i
46 hg debugindex .hg/data/adifferentfile.i
47 hg debugindex .hg/data/anotherfile.i
48 hg debugindex .hg/data/fred.i
49 hg debugindex .hg/00manifest.i
45 hg debugindex .hg/store/data/afile.i
46 hg debugindex .hg/store/data/adifferentfile.i
47 hg debugindex .hg/store/data/anotherfile.i
48 hg debugindex .hg/store/data/fred.i
49 hg debugindex .hg/store/00manifest.i
50 50 hg verify
51 51 echo "# Starting server"
52 52 hg serve -p 20061 -d --pid-file=../hg1.pid
53 53 cd ..
54 54 cat hg1.pid >> $DAEMON_PIDS
55 55
56 56 echo "# clone remote via stream"
57 57 for i in 0 1 2 3 4 5 6 7 8; do
58 58 hg clone -r "$i" http://localhost:20061/ test-"$i" 2>&1
59 59 if cd test-"$i"; then
60 60 hg verify
61 61 cd ..
62 62 fi
63 63 done
64 64 cd test-8
65 65 hg pull ../test-7
66 66 hg verify
67 67 cd ..
68 68 cd test-1
69 69 hg pull -r 4 http://localhost:20061/ 2>&1
70 70 hg verify
71 71 hg pull http://localhost:20061/ 2>&1
72 72 cd ..
73 73 cd test-2
74 74 hg pull -r 5 http://localhost:20061/ 2>&1
75 75 hg verify
76 76 hg pull http://localhost:20061/ 2>&1
77 77 hg verify
78 78 cd ..
@@ -1,16 +1,16 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 mkfifo p
5 5
6 6 hg serve --stdio < p &
7 7 P=$!
8 8 (echo lock; echo addchangegroup; sleep 5) > p &
9 9 Q=$!
10 10 sleep 3
11 11 kill -HUP $P
12 12 wait
13 ls .hg
13 ls -R .hg
14 14
15 15
16 16
@@ -1,9 +1,14 b''
1 1 0
2 2 0
3 3 adding changesets
4 4 killed!
5 5 transaction abort!
6 6 rollback completed
7 .hg:
7 8 00changelog.i
8 9 journal.dirstate
9 10 requires
11 store
12
13 .hg/store:
14 00changelog.i
@@ -1,11 +1,11 b''
1 1 #!/bin/sh
2 2
3 3 hg init a
4 4 echo a > a/a
5 5 hg --cwd a ci -A -m a
6 6 hg clone a b
7 7 echo b > b/b
8 8 hg --cwd b ci -A -m b
9 chmod 100 a/.hg
9 chmod 100 a/.hg/store
10 10 hg --cwd b push ../a
11 chmod 700 a/.hg
11 chmod 700 a/.hg/store
@@ -1,66 +1,66 b''
1 1 #!/bin/sh
2 2
3 3 # initial
4 4 hg init test-a
5 5 cd test-a
6 6 cat >test.txt <<"EOF"
7 7 1
8 8 2
9 9 3
10 10 EOF
11 11 hg add test.txt
12 12 hg commit -m "Initial" -d "1000000 0"
13 13
14 14 # clone
15 15 cd ..
16 16 hg clone test-a test-b
17 17
18 18 # change test-a
19 19 cd test-a
20 20 cat >test.txt <<"EOF"
21 21 one
22 22 two
23 23 three
24 24 EOF
25 25 hg commit -m "Numbers as words" -d "1000000 0"
26 26
27 27 # change test-b
28 28 cd ../test-b
29 29 cat >test.txt <<"EOF"
30 30 1
31 31 2.5
32 32 3
33 33 EOF
34 34 hg commit -m "2 -> 2.5" -d "1000000 0"
35 35
36 36 # now pull and merge from test-a
37 37 hg pull ../test-a
38 38 HGMERGE=merge hg merge
39 39 # resolve conflict
40 40 cat >test.txt <<"EOF"
41 41 one
42 42 two-point-five
43 43 three
44 44 EOF
45 45 rm -f *.orig
46 46 hg commit -m "Merge 1" -d "1000000 0"
47 47
48 48 # change test-a again
49 49 cd ../test-a
50 50 cat >test.txt <<"EOF"
51 51 one
52 52 two-point-one
53 53 three
54 54 EOF
55 55 hg commit -m "two -> two-point-one" -d "1000000 0"
56 56
57 57 # pull and merge from test-a again
58 58 cd ../test-b
59 59 hg pull ../test-a
60 60 HGMERGE=merge hg merge --debug
61 61
62 62 cat test.txt | sed "s% .*%%"
63 63
64 hg debugindex .hg/data/test.txt.i
64 hg debugindex .hg/store/data/test.txt.i
65 65
66 66 hg log
@@ -1,52 +1,52 b''
1 1 #!/bin/sh
2 2 #
3 3 # revlog.parseindex must be able to parse the index file even if
4 4 # an index entry is split between two 64k blocks. The ideal test
5 5 # would be to create an index file with inline data where
6 6 # 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
7 7 # the size of an index entry) and with an index entry starting right
8 8 # before the 64k block boundary, and try to read it.
9 9 #
10 10 # We approximate that by reducing the read buffer to 1 byte.
11 11 #
12 12
13 13 hg init a
14 14 cd a
15 15 echo abc > foo
16 16 hg add foo
17 17 hg commit -m 'add foo' -d '1000000 0'
18 18
19 19 echo >> foo
20 20 hg commit -m 'change foo' -d '1000001 0'
21 21 hg log -r 0:
22 22
23 23 cat >> test.py << EOF
24 24 from mercurial import changelog, util
25 25 from mercurial.node import *
26 26
27 27 class singlebyteread(object):
28 28 def __init__(self, real):
29 29 self.real = real
30 30
31 31 def read(self, size=-1):
32 32 if size == 65536:
33 33 size = 1
34 34 return self.real.read(size)
35 35
36 36 def __getattr__(self, key):
37 37 return getattr(self.real, key)
38 38
39 39 def opener(*args):
40 40 o = util.opener(*args)
41 41 def wrapper(*a):
42 42 f = o(*a)
43 43 return singlebyteread(f)
44 44 return wrapper
45 45
46 cl = changelog.changelog(opener('.hg'))
46 cl = changelog.changelog(opener('.hg/store'))
47 47 print cl.count(), 'revisions:'
48 48 for r in xrange(cl.count()):
49 49 print short(cl.node(r))
50 50 EOF
51 51
52 52 python test.py
@@ -1,15 +1,15 b''
1 1 #!/bin/sh
2 2
3 3 hg init
4 4 echo foo > a
5 5 hg add a
6 6 hg commit -m "1" -d "1000000 0"
7 7 hg verify
8 chmod -r .hg/data/a.i
8 chmod -r .hg/store/data/a.i
9 9 hg verify 2>/dev/null || echo verify failed
10 chmod +r .hg/data/a.i
10 chmod +r .hg/store/data/a.i
11 11 hg verify 2>/dev/null || echo verify failed
12 chmod -w .hg/data/a.i
12 chmod -w .hg/store/data/a.i
13 13 echo barber > a
14 14 hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
15 15
@@ -1,19 +1,19 b''
1 1 #!/bin/sh
2 2
3 3 mkdir a
4 4 cd a
5 5 hg init
6 6 echo foo > b
7 7 hg add b
8 8 hg ci -m "b" -d "1000000 0"
9 9
10 chmod -w .hg
10 chmod -w .hg/store
11 11
12 12 cd ..
13 13
14 14 hg clone a b
15 15
16 chmod +w a/.hg # let test clean up
16 chmod +w a/.hg/store # let test clean up
17 17
18 18 cd b
19 19 hg verify
@@ -1,61 +1,61 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 cat >>afile <<EOF
6 6 0
7 7 EOF
8 8 hg add afile
9 9 hg commit -m "0.0"
10 10 cat >>afile <<EOF
11 11 1
12 12 EOF
13 13 hg commit -m "0.1"
14 14 cat >>afile <<EOF
15 15 2
16 16 EOF
17 17 hg commit -m "0.2"
18 18 cat >>afile <<EOF
19 19 3
20 20 EOF
21 21 hg commit -m "0.3"
22 22 hg update -C 0
23 23 cat >>afile <<EOF
24 24 1
25 25 EOF
26 26 hg commit -m "1.1"
27 27 cat >>afile <<EOF
28 28 2
29 29 EOF
30 30 hg commit -m "1.2"
31 31 cat >fred <<EOF
32 32 a line
33 33 EOF
34 34 cat >>afile <<EOF
35 35 3
36 36 EOF
37 37 hg add fred
38 38 hg commit -m "1.3"
39 39 hg mv afile adifferentfile
40 40 hg commit -m "1.3m"
41 41 hg update -C 3
42 42 hg mv afile anotherfile
43 43 hg commit -m "0.3m"
44 hg debugindex .hg/data/afile.i
45 hg debugindex .hg/data/adifferentfile.i
46 hg debugindex .hg/data/anotherfile.i
47 hg debugindex .hg/data/fred.i
48 hg debugindex .hg/00manifest.i
44 hg debugindex .hg/store/data/afile.i
45 hg debugindex .hg/store/data/adifferentfile.i
46 hg debugindex .hg/store/data/anotherfile.i
47 hg debugindex .hg/store/data/fred.i
48 hg debugindex .hg/store/00manifest.i
49 49 hg verify
50 50 cd ..
51 51 for i in 0 1 2 3 4 5 6 7 8; do
52 52 mkdir test-"$i"
53 53 hg --cwd test-"$i" init
54 54 hg -R test push -r "$i" test-"$i"
55 55 cd test-"$i"
56 56 hg verify
57 57 cd ..
58 58 done
59 59 cd test-8
60 60 hg pull ../test-7
61 61 hg verify
@@ -1,27 +1,27 b''
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 echo "[merge]" >> .hg/hgrc
7 7 echo "followcopies = 1" >> .hg/hgrc
8 8 echo foo > a
9 9 echo foo > a2
10 10 hg add a a2
11 11 hg ci -m "start" -d "0 0"
12 12 hg mv a b
13 13 hg mv a2 b2
14 14 hg ci -m "rename" -d "0 0"
15 15 echo "checkout"
16 16 hg co 0
17 17 echo blahblah > a
18 18 echo blahblah > a2
19 19 hg mv a2 c2
20 20 hg ci -m "modify" -d "0 0"
21 21 echo "merge"
22 22 hg merge -y --debug
23 23 hg status -AC
24 24 cat b
25 25 hg ci -m "merge" -d "0 0"
26 hg debugindex .hg/data/b.i
26 hg debugindex .hg/store/data/b.i
27 27 hg debugrename b No newline at end of file
@@ -1,7 +1,2 b''
1 changeset: 0:0acdaf898367
2 tag: tip
3 user: test
4 date: Mon Jan 12 13:46:40 1970 +0000
5 summary: test
6
1 abort: index 00changelog.i unknown format 2!
7 2 abort: requirement 'indoor-pool' not supported!
@@ -1,99 +1,100 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 SSH_CLIENT='127.0.0.1 1 2'
21 21 export SSH_CLIENT
22 22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 23 $2
24 24 EOF
25 25 chmod +x dummyssh
26 26
27 27 echo "# creating 'remote'"
28 28 hg init remote
29 29 cd remote
30 30 echo this > foo
31 hg ci -A -m "init" -d "1000000 0" foo
31 echo this > fooO
32 hg ci -A -m "init" -d "1000000 0" foo fooO
32 33 echo '[server]' > .hg/hgrc
33 34 echo 'uncompressed = True' >> .hg/hgrc
34 35 echo '[hooks]' >> .hg/hgrc
35 36 echo 'changegroup = echo changegroup in remote: u=$HG_URL >> ../dummylog' >> .hg/hgrc
36 37
37 38 cd ..
38 39
39 40 echo "# repo not found error"
40 41 hg clone -e ./dummyssh ssh://user@dummy/nonexistent local
41 42
42 43 echo "# clone remote via stream"
43 44 hg clone -e ./dummyssh --uncompressed ssh://user@dummy/remote local-stream 2>&1 | \
44 45 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
45 46 cd local-stream
46 47 hg verify
47 48 cd ..
48 49
49 50 echo "# clone remote via pull"
50 51 hg clone -e ./dummyssh ssh://user@dummy/remote local
51 52
52 53 echo "# verify"
53 54 cd local
54 55 hg verify
55 56
56 57 echo '[hooks]' >> .hg/hgrc
57 58 echo 'changegroup = echo changegroup in local: u=$HG_URL >> ../dummylog' >> .hg/hgrc
58 59
59 60 echo "# empty default pull"
60 61 hg paths
61 62 hg pull -e ../dummyssh
62 63
63 64 echo "# local change"
64 65 echo bleah > foo
65 66 hg ci -m "add" -d "1000000 0"
66 67
67 68 echo "# updating rc"
68 69 echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
69 70 echo "[ui]" >> .hg/hgrc
70 71 echo "ssh = ../dummyssh" >> .hg/hgrc
71 72
72 73 echo "# find outgoing"
73 74 hg out ssh://user@dummy/remote
74 75
75 76 echo "# find incoming on the remote side"
76 77 hg incoming -R ../remote -e ../dummyssh ssh://user@dummy/local
77 78
78 79 echo "# push"
79 80 hg push
80 81
81 82 cd ../remote
82 83
83 84 echo "# check remote tip"
84 85 hg tip
85 86 hg verify
86 87 hg cat -r tip foo
87 88
88 89 echo z > z
89 90 hg ci -A -m z -d '1000001 0' z
90 91
91 92 cd ../local
92 93 echo r > r
93 94 hg ci -A -m z -d '1000002 0' r
94 95
95 96 echo "# push should succeed"
96 97 hg push
97 98
98 99 cd ..
99 100 cat dummylog
@@ -1,99 +1,99 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 SSH_CLIENT='127.0.0.1 1 2'
21 21 export SSH_CLIENT
22 22 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
23 23 $2
24 24 EOF
25 25 chmod +x dummyssh
26 26
27 27 hg init remote
28 28 cd remote
29 29 echo "# creating 'remote'"
30 30 cat >>afile <<EOF
31 31 0
32 32 EOF
33 33 hg add afile
34 34 hg commit -m "0.0"
35 35 cat >>afile <<EOF
36 36 1
37 37 EOF
38 38 hg commit -m "0.1"
39 39 cat >>afile <<EOF
40 40 2
41 41 EOF
42 42 hg commit -m "0.2"
43 43 cat >>afile <<EOF
44 44 3
45 45 EOF
46 46 hg commit -m "0.3"
47 47 hg update -C 0
48 48 cat >>afile <<EOF
49 49 1
50 50 EOF
51 51 hg commit -m "1.1"
52 52 cat >>afile <<EOF
53 53 2
54 54 EOF
55 55 hg commit -m "1.2"
56 56 cat >fred <<EOF
57 57 a line
58 58 EOF
59 59 cat >>afile <<EOF
60 60 3
61 61 EOF
62 62 hg add fred
63 63 hg commit -m "1.3"
64 64 hg mv afile adifferentfile
65 65 hg commit -m "1.3m"
66 66 hg update -C 3
67 67 hg mv afile anotherfile
68 68 hg commit -m "0.3m"
69 hg debugindex .hg/data/afile.i
70 hg debugindex .hg/data/adifferentfile.i
71 hg debugindex .hg/data/anotherfile.i
72 hg debugindex .hg/data/fred.i
73 hg debugindex .hg/00manifest.i
69 hg debugindex .hg/store/data/afile.i
70 hg debugindex .hg/store/data/adifferentfile.i
71 hg debugindex .hg/store/data/anotherfile.i
72 hg debugindex .hg/store/data/fred.i
73 hg debugindex .hg/store/00manifest.i
74 74 hg verify
75 75 cd ..
76 76
77 77 echo "# clone remote via stream"
78 78 for i in 0 1 2 3 4 5 6 7 8; do
79 79 hg clone -e ./dummyssh --uncompressed -r "$i" ssh://user@dummy/remote test-"$i" 2>&1
80 80 if cd test-"$i"; then
81 81 hg verify
82 82 cd ..
83 83 fi
84 84 done
85 85 cd test-8
86 86 hg pull ../test-7
87 87 hg verify
88 88 cd ..
89 89 cd test-1
90 90 hg pull -e ../dummyssh -r 4 ssh://user@dummy/remote 2>&1
91 91 hg verify
92 92 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
93 93 cd ..
94 94 cd test-2
95 95 hg pull -e ../dummyssh -r 5 ssh://user@dummy/remote 2>&1
96 96 hg verify
97 97 hg pull -e ../dummyssh ssh://user@dummy/remote 2>&1
98 98 hg verify
99 99 cd ..
@@ -1,88 +1,88 b''
1 1 # creating 'remote'
2 2 # repo not found error
3 3 remote: abort: repository nonexistent not found!
4 4 abort: no suitable response from remote hg!
5 5 # clone remote via stream
6 6 streaming all changes
7 7 XXX files to transfer, XXX bytes of data
8 8 transferred XXX bytes in XXX seconds (XXX XB/sec)
9 9 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
10 10 checking changesets
11 11 checking manifests
12 12 crosschecking files in changesets and manifests
13 13 checking files
14 1 files, 1 changesets, 1 total revisions
14 2 files, 1 changesets, 2 total revisions
15 15 # clone remote via pull
16 16 requesting all changes
17 17 adding changesets
18 18 adding manifests
19 19 adding file changes
20 added 1 changesets with 1 changes to 1 files
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
20 added 1 changesets with 2 changes to 2 files
21 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 22 # verify
23 23 checking changesets
24 24 checking manifests
25 25 crosschecking files in changesets and manifests
26 26 checking files
27 1 files, 1 changesets, 1 total revisions
27 2 files, 1 changesets, 2 total revisions
28 28 # empty default pull
29 29 default = ssh://user@dummy/remote
30 30 pulling from ssh://user@dummy/remote
31 31 searching for changes
32 32 no changes found
33 33 # local change
34 34 # updating rc
35 35 # find outgoing
36 36 searching for changes
37 changeset: 1:c54836a570be
37 changeset: 1:572896fe480d
38 38 tag: tip
39 39 user: test
40 40 date: Mon Jan 12 13:46:40 1970 +0000
41 41 summary: add
42 42
43 43 # find incoming on the remote side
44 44 searching for changes
45 changeset: 1:c54836a570be
45 changeset: 1:572896fe480d
46 46 tag: tip
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: add
50 50
51 51 # push
52 52 pushing to ssh://user@dummy/remote
53 53 searching for changes
54 54 remote: adding changesets
55 55 remote: adding manifests
56 56 remote: adding file changes
57 57 remote: added 1 changesets with 1 changes to 1 files
58 58 # check remote tip
59 changeset: 1:c54836a570be
59 changeset: 1:572896fe480d
60 60 tag: tip
61 61 user: test
62 62 date: Mon Jan 12 13:46:40 1970 +0000
63 63 summary: add
64 64
65 65 checking changesets
66 66 checking manifests
67 67 crosschecking files in changesets and manifests
68 68 checking files
69 1 files, 2 changesets, 2 total revisions
69 2 files, 2 changesets, 3 total revisions
70 70 bleah
71 71 # push should succeed
72 72 pushing to ssh://user@dummy/remote
73 73 searching for changes
74 74 note: unsynced remote changes!
75 75 remote: adding changesets
76 76 remote: adding manifests
77 77 remote: adding file changes
78 78 remote: added 1 changesets with 1 changes to 1 files
79 79 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio 3: 4: 5:
80 80 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
81 81 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
82 82 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
83 83 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
84 84 Got arguments 1:user@dummy 2:hg -R local serve --stdio 3: 4: 5:
85 85 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
86 86 changegroup in remote: u=remote:ssh:127.0.0.1
87 87 Got arguments 1:user@dummy 2:hg -R remote serve --stdio 3: 4: 5:
88 88 changegroup in remote: u=remote:ssh:127.0.0.1
General Comments 0
You need to be logged in to leave comments. Login now