##// END OF EJS Templates
merge with brendan
Benoit Boissinot -
r3058:11e3396e merge default
parent child Browse files
Show More
@@ -0,0 +1,179 b''
1 # churn.py - create a graph showing who changed the most lines
2 #
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7 #
8 #
9 # Aliases map file format is simple one alias per line in the following
10 # format:
11 #
12 # <alias email> <actual email>
13
14 from mercurial.demandload import *
15 from mercurial.i18n import gettext as _
16 demandload(globals(), 'time sys signal os')
17 demandload(globals(), 'mercurial:hg,mdiff,fancyopts,commands,ui,util,templater,node')
18
19 def __gather(ui, repo, node1, node2):
20 def dirtywork(f, mmap1, mmap2):
21 lines = 0
22
23 to = mmap1 and repo.file(f).read(mmap1[f]) or None
24 tn = mmap2 and repo.file(f).read(mmap2[f]) or None
25
26 diff = mdiff.unidiff(to, "", tn, "", f).split("\n")
27
28 for line in diff:
29 if not line:
30 continue # skip EOF
31 if line.startswith(" "):
32 continue # context line
33 if line.startswith("--- ") or line.startswith("+++ "):
34 continue # begining of diff
35 if line.startswith("@@ "):
36 continue # info line
37
38 # changed lines
39 lines += 1
40
41 return lines
42
43 ##
44
45 lines = 0
46
47 changes = repo.status(node1, node2, None, util.always)[:5]
48
49 modified, added, removed, deleted, unknown = changes
50
51 who = repo.changelog.read(node2)[1]
52 who = templater.email(who) # get the email of the person
53
54 mmap1 = repo.manifest.read(repo.changelog.read(node1)[0])
55 mmap2 = repo.manifest.read(repo.changelog.read(node2)[0])
56 for f in modified:
57 lines += dirtywork(f, mmap1, mmap2)
58
59 for f in added:
60 lines += dirtywork(f, None, mmap2)
61
62 for f in removed:
63 lines += dirtywork(f, mmap1, None)
64
65 for f in deleted:
66 lines += dirtywork(f, mmap1, mmap2)
67
68 for f in unknown:
69 lines += dirtywork(f, mmap1, mmap2)
70
71 return (who, lines)
72
73 def gather_stats(ui, repo, amap, revs=None, progress=False):
74 stats = {}
75
76 cl = repo.changelog
77
78 if not revs:
79 revs = range(0, cl.count())
80
81 nr_revs = len(revs)
82 cur_rev = 0
83
84 for rev in revs:
85 cur_rev += 1 # next revision
86
87 node2 = cl.node(rev)
88 node1 = cl.parents(node2)[0]
89
90 if cl.parents(node2)[1] != node.nullid:
91 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
92 continue
93
94 who, lines = __gather(ui, repo, node1, node2)
95
96 # remap the owner if possible
97 if amap.has_key(who):
98 ui.note("using '%s' alias for '%s'\n" % (amap[who], who))
99 who = amap[who]
100
101 if not stats.has_key(who):
102 stats[who] = 0
103 stats[who] += lines
104
105 ui.note("rev %d: %d lines by %s\n" % (rev, lines, who))
106
107 if progress:
108 if int(100.0*(cur_rev - 1)/nr_revs) < int(100.0*cur_rev/nr_revs):
109 ui.write("%d%%.." % (int(100.0*cur_rev/nr_revs),))
110 sys.stdout.flush()
111
112 if progress:
113 ui.write("done\n")
114 sys.stdout.flush()
115
116 return stats
117
118 def churn(ui, repo, **opts):
119 "Graphs the number of lines changed"
120
121 def pad(s, l):
122 if len(s) < l:
123 return s + " " * (l-len(s))
124 return s[0:l]
125
126 def graph(n, maximum, width, char):
127 n = int(n * width / float(maximum))
128
129 return char * (n)
130
131 def get_aliases(f):
132 aliases = {}
133
134 for l in f.readlines():
135 l = l.strip()
136 alias, actual = l.split(" ")
137 aliases[alias] = actual
138
139 return aliases
140
141 amap = {}
142 aliases = opts.get('aliases')
143 if aliases:
144 try:
145 f = open(aliases,"r")
146 except OSError, e:
147 print "Error: " + e
148 return
149
150 amap = get_aliases(f)
151 f.close()
152
153 revs = [int(r) for r in commands.revrange(ui, repo, opts['rev'])]
154 revs.sort()
155 stats = gather_stats(ui, repo, amap, revs, opts.get('progress'))
156
157 # make a list of tuples (name, lines) and sort it in descending order
158 ordered = stats.items()
159 ordered.sort(cmp=lambda x, y: cmp(y[1], x[1]))
160
161 maximum = ordered[0][1]
162
163 ui.note("Assuming 80 character terminal\n")
164 width = 80 - 1
165
166 for i in ordered:
167 person = i[0]
168 lines = i[1]
169 print "%s %6d %s" % (pad(person, 20), lines,
170 graph(lines, maximum, width - 20 - 1 - 6 - 2 - 2, '*'))
171
172 cmdtable = {
173 "churn":
174 (churn,
175 [('r', 'rev', [], _('limit statistics to the specified revisions')),
176 ('', 'aliases', '', _('file with email aliases')),
177 ('', 'progress', None, _('show progress'))],
178 'hg churn [-r revision range] [-a file] [--progress]'),
179 }
@@ -1,493 +1,508 b''
1 1 HGRC(5)
2 2 =======
3 3 Bryan O'Sullivan <bos@serpentine.com>
4 4
5 5 NAME
6 6 ----
7 7 hgrc - configuration files for Mercurial
8 8
9 9 SYNOPSIS
10 10 --------
11 11
12 12 The Mercurial system uses a set of configuration files to control
13 13 aspects of its behaviour.
14 14
15 15 FILES
16 16 -----
17 17
18 18 Mercurial reads configuration data from several files, if they exist.
19 19 The names of these files depend on the system on which Mercurial is
20 20 installed.
21 21
22 22 (Unix) <install-root>/etc/mercurial/hgrc.d/*.rc::
23 23 (Unix) <install-root>/etc/mercurial/hgrc::
24 24 Per-installation configuration files, searched for in the
25 25 directory where Mercurial is installed. For example, if installed
26 26 in /shared/tools, Mercurial will look in
27 27 /shared/tools/etc/mercurial/hgrc. Options in these files apply to
28 28 all Mercurial commands executed by any user in any directory.
29 29
30 30 (Unix) /etc/mercurial/hgrc.d/*.rc::
31 31 (Unix) /etc/mercurial/hgrc::
32 32 (Windows) C:\Mercurial\Mercurial.ini::
33 33 Per-system configuration files, for the system on which Mercurial
34 34 is running. Options in these files apply to all Mercurial
35 35 commands executed by any user in any directory. Options in these
36 36 files override per-installation options.
37 37
38 38 (Unix) $HOME/.hgrc::
39 39 (Windows) C:\Documents and Settings\USERNAME\Mercurial.ini::
40 40 (Windows) $HOME\Mercurial.ini::
41 41 Per-user configuration file, for the user running Mercurial.
42 42 Options in this file apply to all Mercurial commands executed by
43 43 any user in any directory. Options in this file override
44 44 per-installation and per-system options.
45 45 On Windows system, one of these is chosen exclusively according
46 46 to definition of HOME environment variable.
47 47
48 48 (Unix, Windows) <repo>/.hg/hgrc::
49 49 Per-repository configuration options that only apply in a
50 50 particular repository. This file is not version-controlled, and
51 51 will not get transferred during a "clone" operation. Options in
52 52 this file override options in all other configuration files.
53 53 On Unix, this file is only read if it belongs to a trusted user
54 54 or to a trusted group.
55 55
56 56 SYNTAX
57 57 ------
58 58
59 59 A configuration file consists of sections, led by a "[section]" header
60 60 and followed by "name: value" entries; "name=value" is also accepted.
61 61
62 62 [spam]
63 63 eggs=ham
64 64 green=
65 65 eggs
66 66
67 67 Each line contains one entry. If the lines that follow are indented,
68 68 they are treated as continuations of that entry.
69 69
70 70 Leading whitespace is removed from values. Empty lines are skipped.
71 71
72 72 The optional values can contain format strings which refer to other
73 73 values in the same section, or values in a special DEFAULT section.
74 74
75 75 Lines beginning with "#" or ";" are ignored and may be used to provide
76 76 comments.
77 77
78 78 SECTIONS
79 79 --------
80 80
81 81 This section describes the different sections that may appear in a
82 82 Mercurial "hgrc" file, the purpose of each section, its possible
83 83 keys, and their possible values.
84 84
85 85 decode/encode::
86 86 Filters for transforming files on checkout/checkin. This would
87 87 typically be used for newline processing or other
88 88 localization/canonicalization of files.
89 89
90 90 Filters consist of a filter pattern followed by a filter command.
91 91 Filter patterns are globs by default, rooted at the repository
92 92 root. For example, to match any file ending in ".txt" in the root
93 93 directory only, use the pattern "*.txt". To match any file ending
94 94 in ".c" anywhere in the repository, use the pattern "**.c".
95 95
96 96 The filter command can start with a specifier, either "pipe:" or
97 97 "tempfile:". If no specifier is given, "pipe:" is used by default.
98 98
99 99 A "pipe:" command must accept data on stdin and return the
100 100 transformed data on stdout.
101 101
102 102 Pipe example:
103 103
104 104 [encode]
105 105 # uncompress gzip files on checkin to improve delta compression
106 106 # note: not necessarily a good idea, just an example
107 107 *.gz = pipe: gunzip
108 108
109 109 [decode]
110 110 # recompress gzip files when writing them to the working dir (we
111 111 # can safely omit "pipe:", because it's the default)
112 112 *.gz = gzip
113 113
114 114 A "tempfile:" command is a template. The string INFILE is replaced
115 115 with the name of a temporary file that contains the data to be
116 116 filtered by the command. The string OUTFILE is replaced with the
117 117 name of an empty temporary file, where the filtered data must be
118 118 written by the command.
119 119
120 120 NOTE: the tempfile mechanism is recommended for Windows systems,
121 121 where the standard shell I/O redirection operators often have
122 122 strange effects. In particular, if you are doing line ending
123 123 conversion on Windows using the popular dos2unix and unix2dos
124 124 programs, you *must* use the tempfile mechanism, as using pipes will
125 125 corrupt the contents of your files.
126 126
127 127 Tempfile example:
128 128
129 129 [encode]
130 130 # convert files to unix line ending conventions on checkin
131 131 **.txt = tempfile: dos2unix -n INFILE OUTFILE
132 132
133 133 [decode]
134 134 # convert files to windows line ending conventions when writing
135 135 # them to the working dir
136 136 **.txt = tempfile: unix2dos -n INFILE OUTFILE
137 137
138 defaults::
139 Use the [defaults] section to define command defaults, i.e. the
140 default options/arguments to pass to the specified commands.
141
142 The following example makes 'hg log' run in verbose mode, and
143 'hg status' show only the modified files, by default.
144
145 [defaults]
146 log = -v
147 status = -m
148
149 The actual commands, instead of their aliases, must be used when
150 defining command defaults. The command defaults will also be
151 applied to the aliases of the commands defined.
152
138 153 email::
139 154 Settings for extensions that send email messages.
140 155 from;;
141 156 Optional. Email address to use in "From" header and SMTP envelope
142 157 of outgoing messages.
143 158 to;;
144 159 Optional. Comma-separated list of recipients' email addresses.
145 160 cc;;
146 161 Optional. Comma-separated list of carbon copy recipients'
147 162 email addresses.
148 163 bcc;;
149 164 Optional. Comma-separated list of blind carbon copy
150 165 recipients' email addresses. Cannot be set interactively.
151 166 method;;
152 167 Optional. Method to use to send email messages. If value is
153 168 "smtp" (default), use SMTP (see section "[smtp]" for
154 169 configuration). Otherwise, use as name of program to run that
155 170 acts like sendmail (takes "-f" option for sender, list of
156 171 recipients on command line, message on stdin). Normally, setting
157 172 this to "sendmail" or "/usr/sbin/sendmail" is enough to use
158 173 sendmail to send messages.
159 174
160 175 Email example:
161 176
162 177 [email]
163 178 from = Joseph User <joe.user@example.com>
164 179 method = /usr/sbin/sendmail
165 180
166 181 extensions::
167 182 Mercurial has an extension mechanism for adding new features. To
168 183 enable an extension, create an entry for it in this section.
169 184
170 185 If you know that the extension is already in Python's search path,
171 186 you can give the name of the module, followed by "=", with nothing
172 187 after the "=".
173 188
174 189 Otherwise, give a name that you choose, followed by "=", followed by
175 190 the path to the ".py" file (including the file name extension) that
176 191 defines the extension.
177 192
178 193 Example for ~/.hgrc:
179 194
180 195 [extensions]
181 196 # (the mq extension will get loaded from mercurial's path)
182 197 hgext.mq =
183 198 # (this extension will get loaded from the file specified)
184 199 myfeature = ~/.hgext/myfeature.py
185 200
186 201 hooks::
187 202 Commands or Python functions that get automatically executed by
188 203 various actions such as starting or finishing a commit. Multiple
189 204 hooks can be run for the same action by appending a suffix to the
190 205 action. Overriding a site-wide hook can be done by changing its
191 206 value or setting it to an empty string.
192 207
193 208 Example .hg/hgrc:
194 209
195 210 [hooks]
196 211 # do not use the site-wide hook
197 212 incoming =
198 213 incoming.email = /my/email/hook
199 214 incoming.autobuild = /my/build/hook
200 215
201 216 Most hooks are run with environment variables set that give added
202 217 useful information. For each hook below, the environment variables
203 218 it is passed are listed with names of the form "$HG_foo".
204 219
205 220 changegroup;;
206 221 Run after a changegroup has been added via push, pull or
207 222 unbundle. ID of the first new changeset is in $HG_NODE. URL from
208 223 which changes came is in $HG_URL.
209 224 commit;;
210 225 Run after a changeset has been created in the local repository.
211 226 ID of the newly created changeset is in $HG_NODE. Parent
212 227 changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
213 228 incoming;;
214 229 Run after a changeset has been pulled, pushed, or unbundled into
215 230 the local repository. The ID of the newly arrived changeset is in
216 231 $HG_NODE. URL that was source of changes came is in $HG_URL.
217 232 outgoing;;
218 233 Run after sending changes from local repository to another. ID of
219 234 first changeset sent is in $HG_NODE. Source of operation is in
220 235 $HG_SOURCE; see "preoutgoing" hook for description.
221 236 prechangegroup;;
222 237 Run before a changegroup is added via push, pull or unbundle.
223 238 Exit status 0 allows the changegroup to proceed. Non-zero status
224 239 will cause the push, pull or unbundle to fail. URL from which
225 240 changes will come is in $HG_URL.
226 241 precommit;;
227 242 Run before starting a local commit. Exit status 0 allows the
228 243 commit to proceed. Non-zero status will cause the commit to fail.
229 244 Parent changeset IDs are in $HG_PARENT1 and $HG_PARENT2.
230 245 preoutgoing;;
231 246 Run before computing changes to send from the local repository to
232 247 another. Non-zero status will cause failure. This lets you
233 248 prevent pull over http or ssh. Also prevents against local pull,
234 249 push (outbound) or bundle commands, but not effective, since you
235 250 can just copy files instead then. Source of operation is in
236 251 $HG_SOURCE. If "serve", operation is happening on behalf of
237 252 remote ssh or http repository. If "push", "pull" or "bundle",
238 253 operation is happening on behalf of repository on same system.
239 254 pretag;;
240 255 Run before creating a tag. Exit status 0 allows the tag to be
241 256 created. Non-zero status will cause the tag to fail. ID of
242 257 changeset to tag is in $HG_NODE. Name of tag is in $HG_TAG. Tag
243 258 is local if $HG_LOCAL=1, in repo if $HG_LOCAL=0.
244 259 pretxnchangegroup;;
245 260 Run after a changegroup has been added via push, pull or unbundle,
246 261 but before the transaction has been committed. Changegroup is
247 262 visible to hook program. This lets you validate incoming changes
248 263 before accepting them. Passed the ID of the first new changeset
249 264 in $HG_NODE. Exit status 0 allows the transaction to commit.
250 265 Non-zero status will cause the transaction to be rolled back and
251 266 the push, pull or unbundle will fail. URL that was source of
252 267 changes is in $HG_URL.
253 268 pretxncommit;;
254 269 Run after a changeset has been created but the transaction not yet
255 270 committed. Changeset is visible to hook program. This lets you
256 271 validate commit message and changes. Exit status 0 allows the
257 272 commit to proceed. Non-zero status will cause the transaction to
258 273 be rolled back. ID of changeset is in $HG_NODE. Parent changeset
259 274 IDs are in $HG_PARENT1 and $HG_PARENT2.
260 275 preupdate;;
261 276 Run before updating the working directory. Exit status 0 allows
262 277 the update to proceed. Non-zero status will prevent the update.
263 278 Changeset ID of first new parent is in $HG_PARENT1. If merge, ID
264 279 of second new parent is in $HG_PARENT2.
265 280 tag;;
266 281 Run after a tag is created. ID of tagged changeset is in
267 282 $HG_NODE. Name of tag is in $HG_TAG. Tag is local if
268 283 $HG_LOCAL=1, in repo if $HG_LOCAL=0.
269 284 update;;
270 285 Run after updating the working directory. Changeset ID of first
271 286 new parent is in $HG_PARENT1. If merge, ID of second new parent
272 287 is in $HG_PARENT2. If update succeeded, $HG_ERROR=0. If update
273 288 failed (e.g. because conflicts not resolved), $HG_ERROR=1.
274 289
275 290 Note: In earlier releases, the names of hook environment variables
276 291 did not have a "HG_" prefix. The old unprefixed names are no longer
277 292 provided in the environment.
278 293
279 294 The syntax for Python hooks is as follows:
280 295
281 296 hookname = python:modulename.submodule.callable
282 297
283 298 Python hooks are run within the Mercurial process. Each hook is
284 299 called with at least three keyword arguments: a ui object (keyword
285 300 "ui"), a repository object (keyword "repo"), and a "hooktype"
286 301 keyword that tells what kind of hook is used. Arguments listed as
287 302 environment variables above are passed as keyword arguments, with no
288 303 "HG_" prefix, and names in lower case.
289 304
290 305 A Python hook must return a "true" value to succeed. Returning a
291 306 "false" value or raising an exception is treated as failure of the
292 307 hook.
293 308
294 309 http_proxy::
295 310 Used to access web-based Mercurial repositories through a HTTP
296 311 proxy.
297 312 host;;
298 313 Host name and (optional) port of the proxy server, for example
299 314 "myproxy:8000".
300 315 no;;
301 316 Optional. Comma-separated list of host names that should bypass
302 317 the proxy.
303 318 passwd;;
304 319 Optional. Password to authenticate with at the proxy server.
305 320 user;;
306 321 Optional. User name to authenticate with at the proxy server.
307 322
308 323 smtp::
309 324 Configuration for extensions that need to send email messages.
310 325 host;;
311 326 Host name of mail server, e.g. "mail.example.com".
312 327 port;;
313 328 Optional. Port to connect to on mail server. Default: 25.
314 329 tls;;
315 330 Optional. Whether to connect to mail server using TLS. True or
316 331 False. Default: False.
317 332 username;;
318 333 Optional. User name to authenticate to SMTP server with.
319 334 If username is specified, password must also be specified.
320 335 Default: none.
321 336 password;;
322 337 Optional. Password to authenticate to SMTP server with.
323 338 If username is specified, password must also be specified.
324 339 Default: none.
325 340 local_hostname;;
326 341 Optional. It's the hostname that the sender can use to identify itself
327 342 to the MTA.
328 343
329 344 paths::
330 345 Assigns symbolic names to repositories. The left side is the
331 346 symbolic name, and the right gives the directory or URL that is the
332 347 location of the repository. Default paths can be declared by
333 348 setting the following entries.
334 349 default;;
335 350 Directory or URL to use when pulling if no source is specified.
336 351 Default is set to repository from which the current repository
337 352 was cloned.
338 353 default-push;;
339 354 Optional. Directory or URL to use when pushing if no destination
340 355 is specified.
341 356
342 357 server::
343 358 Controls generic server settings.
344 359 uncompressed;;
345 360 Whether to allow clients to clone a repo using the uncompressed
346 361 streaming protocol. This transfers about 40% more data than a
347 362 regular clone, but uses less memory and CPU on both server and
348 363 client. Over a LAN (100Mbps or better) or a very fast WAN, an
349 364 uncompressed streaming clone is a lot faster (~10x) than a regular
350 365 clone. Over most WAN connections (anything slower than about
351 366 6Mbps), uncompressed streaming is slower, because of the extra
352 367 data transfer overhead. Default is False.
353 368
354 369 trusted::
355 370 Mercurial will only read the .hg/hgrc file from a repository if
356 371 it belongs to a trusted user or to a trusted group. This section
357 372 specifies what users and groups are trusted. To trust everybody,
358 373 list a user or a group with name "*".
359 374 users;;
360 375 Comma-separated list of trusted users.
361 376 groups;;
362 377 Comma-separated list of trusted groups.
363 378
364 379 ui::
365 380 User interface controls.
366 381 debug;;
367 382 Print debugging information. True or False. Default is False.
368 383 editor;;
369 384 The editor to use during a commit. Default is $EDITOR or "vi".
370 385 ignore;;
371 386 A file to read per-user ignore patterns from. This file should be in
372 387 the same format as a repository-wide .hgignore file. This option
373 388 supports hook syntax, so if you want to specify multiple ignore
374 389 files, you can do so by setting something like
375 390 "ignore.other = ~/.hgignore2". For details of the ignore file
376 391 format, see the hgignore(5) man page.
377 392 interactive;;
378 393 Allow to prompt the user. True or False. Default is True.
379 394 logtemplate;;
380 395 Template string for commands that print changesets.
381 396 style;;
382 397 Name of style to use for command output.
383 398 merge;;
384 399 The conflict resolution program to use during a manual merge.
385 400 Default is "hgmerge".
386 401 quiet;;
387 402 Reduce the amount of output printed. True or False. Default is False.
388 403 remotecmd;;
389 404 remote command to use for clone/push/pull operations. Default is 'hg'.
390 405 ssh;;
391 406 command to use for SSH connections. Default is 'ssh'.
392 407 strict;;
393 408 Require exact command names, instead of allowing unambiguous
394 409 abbreviations. True or False. Default is False.
395 410 timeout;;
396 411 The timeout used when a lock is held (in seconds), a negative value
397 412 means no timeout. Default is 600.
398 413 username;;
399 414 The committer of a changeset created when running "commit".
400 415 Typically a person's name and email address, e.g. "Fred Widget
401 416 <fred@example.com>". Default is $EMAIL or username@hostname, unless
402 417 username is set to an empty string, which enforces specifying the
403 418 username manually.
404 419 verbose;;
405 420 Increase the amount of output printed. True or False. Default is False.
406 421
407 422
408 423 web::
409 424 Web interface configuration.
410 425 accesslog;;
411 426 Where to output the access log. Default is stdout.
412 427 address;;
413 428 Interface address to bind to. Default is all.
414 429 allow_archive;;
415 430 List of archive format (bz2, gz, zip) allowed for downloading.
416 431 Default is empty.
417 432 allowbz2;;
418 433 (DEPRECATED) Whether to allow .tar.bz2 downloading of repo revisions.
419 434 Default is false.
420 435 allowgz;;
421 436 (DEPRECATED) Whether to allow .tar.gz downloading of repo revisions.
422 437 Default is false.
423 438 allowpull;;
424 439 Whether to allow pulling from the repository. Default is true.
425 440 allow_push;;
426 441 Whether to allow pushing to the repository. If empty or not set,
427 442 push is not allowed. If the special value "*", any remote user
428 443 can push, including unauthenticated users. Otherwise, the remote
429 444 user must have been authenticated, and the authenticated user name
430 445 must be present in this list (separated by whitespace or ",").
431 446 The contents of the allow_push list are examined after the
432 447 deny_push list.
433 448 allowzip;;
434 449 (DEPRECATED) Whether to allow .zip downloading of repo revisions.
435 450 Default is false. This feature creates temporary files.
436 451 baseurl;;
437 452 Base URL to use when publishing URLs in other locations, so
438 453 third-party tools like email notification hooks can construct URLs.
439 454 Example: "http://hgserver/repos/"
440 455 contact;;
441 456 Name or email address of the person in charge of the repository.
442 457 Default is "unknown".
443 458 deny_push;;
444 459 Whether to deny pushing to the repository. If empty or not set,
445 460 push is not denied. If the special value "*", all remote users
446 461 are denied push. Otherwise, unauthenticated users are all denied,
447 462 and any authenticated user name present in this list (separated by
448 463 whitespace or ",") is also denied. The contents of the deny_push
449 464 list are examined before the allow_push list.
450 465 description;;
451 466 Textual description of the repository's purpose or contents.
452 467 Default is "unknown".
453 468 errorlog;;
454 469 Where to output the error log. Default is stderr.
455 470 ipv6;;
456 471 Whether to use IPv6. Default is false.
457 472 name;;
458 473 Repository name to use in the web interface. Default is current
459 474 working directory.
460 475 maxchanges;;
461 476 Maximum number of changes to list on the changelog. Default is 10.
462 477 maxfiles;;
463 478 Maximum number of files to list per changeset. Default is 10.
464 479 port;;
465 480 Port to listen on. Default is 8000.
466 481 push_ssl;;
467 482 Whether to require that inbound pushes be transported over SSL to
468 483 prevent password sniffing. Default is true.
469 484 stripes;;
470 485 How many lines a "zebra stripe" should span in multiline output.
471 486 Default is 1; set to 0 to disable.
472 487 style;;
473 488 Which template map style to use.
474 489 templates;;
475 490 Where to find the HTML templates. Default is install path.
476 491
477 492
478 493 AUTHOR
479 494 ------
480 495 Bryan O'Sullivan <bos@serpentine.com>.
481 496
482 497 Mercurial was written by Matt Mackall <mpm@selenic.com>.
483 498
484 499 SEE ALSO
485 500 --------
486 501 hg(1), hgignore(5)
487 502
488 503 COPYING
489 504 -------
490 505 This manual page is copyright 2005 Bryan O'Sullivan.
491 506 Mercurial is copyright 2005, 2006 Matt Mackall.
492 507 Free use of this software is granted under the terms of the GNU General
493 508 Public License (GPL).
@@ -1,236 +1,231 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(path and util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path=None, create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106
107 107 def localpath(path):
108 108 if path.startswith('file://'):
109 109 return path[7:]
110 110 if path.startswith('file:'):
111 111 return path[5:]
112 112 return path
113 113
114 114 dest = localpath(dest)
115 115 source = localpath(source)
116 116
117 117 if os.path.exists(dest):
118 118 raise util.Abort(_("destination '%s' already exists"), dest)
119 119
120 120 class DirCleanup(object):
121 121 def __init__(self, dir_):
122 122 self.rmtree = shutil.rmtree
123 123 self.dir_ = dir_
124 124 def close(self):
125 125 self.dir_ = None
126 126 def __del__(self):
127 127 if self.dir_:
128 128 self.rmtree(self.dir_, True)
129 129
130 dest_repo = None
131 try:
132 dest_repo = repository(ui, dest)
133 raise util.Abort(_("destination '%s' already exists." % dest))
134 except RepoError:
135 130 dest_repo = repository(ui, dest, create=True)
136 131
137 132 dest_path = None
138 133 dir_cleanup = None
139 134 if dest_repo.local():
140 135 dest_path = os.path.realpath(dest_repo.root)
141 136 dir_cleanup = DirCleanup(dest_path)
142 137
143 138 abspath = source
144 139 copy = False
145 140 if src_repo.local() and dest_repo.local():
146 141 abspath = os.path.abspath(source)
147 142 copy = not pull and not rev
148 143
149 144 src_lock, dest_lock = None, None
150 145 if copy:
151 146 try:
152 147 # we use a lock here because if we race with commit, we
153 148 # can end up with extra data in the cloned revlogs that's
154 149 # not pointed to by changesets, thus causing verify to
155 150 # fail
156 151 src_lock = src_repo.lock()
157 152 except lock.LockException:
158 153 copy = False
159 154
160 155 if copy:
161 156 # we lock here to avoid premature writing to the target
162 157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
163 158
164 159 # we need to remove the (empty) data dir in dest so copyfiles
165 160 # can do its work
166 161 os.rmdir(os.path.join(dest_path, ".hg", "data"))
167 162 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
168 163 for f in files.split():
169 164 src = os.path.join(source, ".hg", f)
170 165 dst = os.path.join(dest_path, ".hg", f)
171 166 try:
172 167 util.copyfiles(src, dst)
173 168 except OSError, inst:
174 169 if inst.errno != errno.ENOENT:
175 170 raise
176 171
177 172 # we need to re-init the repo after manually copying the data
178 173 # into it
179 174 dest_repo = repository(ui, dest)
180 175
181 176 else:
182 177 revs = None
183 178 if rev:
184 179 if not src_repo.local():
185 180 raise util.Abort(_("clone by revision not supported yet "
186 181 "for remote repositories"))
187 182 revs = [src_repo.lookup(r) for r in rev]
188 183
189 184 if dest_repo.local():
190 185 dest_repo.clone(src_repo, heads=revs, stream=stream)
191 186 elif src_repo.local():
192 187 src_repo.push(dest_repo, revs=revs)
193 188 else:
194 189 raise util.Abort(_("clone from remote to remote not supported"))
195 190
196 191 if src_lock:
197 192 src_lock.release()
198 193
199 194 if dest_repo.local():
200 195 fp = dest_repo.opener("hgrc", "w", text=True)
201 196 fp.write("[paths]\n")
202 197 fp.write("default = %s\n" % abspath)
203 198 fp.close()
204 199
205 200 if dest_lock:
206 201 dest_lock.release()
207 202
208 203 if update:
209 204 _merge.update(dest_repo, dest_repo.changelog.tip())
210 205 if dir_cleanup:
211 206 dir_cleanup.close()
212 207
213 208 return src_repo, dest_repo
214 209
215 210 def update(repo, node):
216 211 """update the working directory to node, merging linear changes"""
217 212 return _merge.update(repo, node)
218 213
219 214 def clean(repo, node, wlock=None, show_stats=True):
220 215 """forcibly switch the working directory to node, clobbering changes"""
221 216 return _merge.update(repo, node, force=True, wlock=wlock,
222 217 show_stats=show_stats)
223 218
224 219 def merge(repo, node, force=None, remind=True, wlock=None):
225 220 """branch merge with node, resolving changes"""
226 221 return _merge.update(repo, node, branchmerge=True, force=force,
227 222 remind=remind, wlock=wlock)
228 223
229 224 def revert(repo, node, choose, wlock):
230 225 """revert changes to revision in node without updating dirstate"""
231 226 return _merge.update(repo, node, force=True, partial=choose,
232 227 show_stats=False, wlock=wlock)
233 228
234 229 def verify(repo):
235 230 """verify the consistency of a repository"""
236 231 return _verify.verify(repo)
@@ -1,1749 +1,1751 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("no repo found"))
31 31 path = p
32 32 self.path = os.path.join(path, ".hg")
33 33
34 if not create and not os.path.isdir(self.path):
34 if not os.path.isdir(self.path):
35 if create:
36 if not os.path.exists(path):
37 os.mkdir(path)
38 os.mkdir(self.path)
39 os.mkdir(self.join("data"))
40 else:
35 41 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
36 44
37 45 self.root = os.path.abspath(path)
38 46 self.origroot = path
39 47 self.ui = ui.ui(parentui=parentui)
40 48 self.opener = util.opener(self.path)
41 49 self.wopener = util.opener(self.root)
42 50
43 51 try:
44 52 self.ui.readconfig(self.join("hgrc"), self.root)
45 53 except IOError:
46 54 pass
47 55
48 56 v = self.ui.revlogopts
49 57 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 58 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 59 fl = v.get('flags', None)
52 60 flags = 0
53 61 if fl != None:
54 62 for x in fl.split():
55 63 flags |= revlog.flagstr(x)
56 64 elif self.revlogv1:
57 65 flags = revlog.REVLOG_DEFAULT_FLAGS
58 66
59 67 v = self.revlogversion | flags
60 68 self.manifest = manifest.manifest(self.opener, v)
61 69 self.changelog = changelog.changelog(self.opener, v)
62 70
63 71 # the changelog might not have the inline index flag
64 72 # on. If the format of the changelog is the same as found in
65 73 # .hgrc, apply any flags found in the .hgrc as well.
66 74 # Otherwise, just version from the changelog
67 75 v = self.changelog.version
68 76 if v == self.revlogversion:
69 77 v |= flags
70 78 self.revlogversion = v
71 79
72 80 self.tagscache = None
73 81 self.nodetagscache = None
74 82 self.encodepats = None
75 83 self.decodepats = None
76 84 self.transhandle = None
77 85
78 if create:
79 if not os.path.exists(path):
80 os.mkdir(path)
81 os.mkdir(self.path)
82 os.mkdir(self.join("data"))
83
84 86 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85 87
86 88 def url(self):
87 89 return 'file:' + self.root
88 90
89 91 def hook(self, name, throw=False, **args):
90 92 def callhook(hname, funcname):
91 93 '''call python hook. hook is callable object, looked up as
92 94 name in python module. if callable returns "true", hook
93 95 fails, else passes. if hook raises exception, treated as
94 96 hook failure. exception propagates if throw is "true".
95 97
96 98 reason for "true" meaning "hook failed" is so that
97 99 unmodified commands (e.g. mercurial.commands.update) can
98 100 be run as hooks without wrappers to convert return values.'''
99 101
100 102 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 103 d = funcname.rfind('.')
102 104 if d == -1:
103 105 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 106 % (hname, funcname))
105 107 modname = funcname[:d]
106 108 try:
107 109 obj = __import__(modname)
108 110 except ImportError:
109 111 try:
110 112 # extensions are loaded with hgext_ prefix
111 113 obj = __import__("hgext_%s" % modname)
112 114 except ImportError:
113 115 raise util.Abort(_('%s hook is invalid '
114 116 '(import of "%s" failed)') %
115 117 (hname, modname))
116 118 try:
117 119 for p in funcname.split('.')[1:]:
118 120 obj = getattr(obj, p)
119 121 except AttributeError, err:
120 122 raise util.Abort(_('%s hook is invalid '
121 123 '("%s" is not defined)') %
122 124 (hname, funcname))
123 125 if not callable(obj):
124 126 raise util.Abort(_('%s hook is invalid '
125 127 '("%s" is not callable)') %
126 128 (hname, funcname))
127 129 try:
128 130 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 131 except (KeyboardInterrupt, util.SignalInterrupt):
130 132 raise
131 133 except Exception, exc:
132 134 if isinstance(exc, util.Abort):
133 135 self.ui.warn(_('error: %s hook failed: %s\n') %
134 136 (hname, exc.args[0] % exc.args[1:]))
135 137 else:
136 138 self.ui.warn(_('error: %s hook raised an exception: '
137 139 '%s\n') % (hname, exc))
138 140 if throw:
139 141 raise
140 142 self.ui.print_exc()
141 143 return True
142 144 if r:
143 145 if throw:
144 146 raise util.Abort(_('%s hook failed') % hname)
145 147 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 148 return r
147 149
148 150 def runhook(name, cmd):
149 151 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 152 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 153 r = util.system(cmd, environ=env, cwd=self.root)
152 154 if r:
153 155 desc, r = util.explain_exit(r)
154 156 if throw:
155 157 raise util.Abort(_('%s hook %s') % (name, desc))
156 158 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 159 return r
158 160
159 161 r = False
160 162 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 163 if hname.split(".", 1)[0] == name and cmd]
162 164 hooks.sort()
163 165 for hname, cmd in hooks:
164 166 if cmd.startswith('python:'):
165 167 r = callhook(hname, cmd[7:].strip()) or r
166 168 else:
167 169 r = runhook(hname, cmd) or r
168 170 return r
169 171
170 172 tag_disallowed = ':\r\n'
171 173
172 174 def tag(self, name, node, message, local, user, date):
173 175 '''tag a revision with a symbolic name.
174 176
175 177 if local is True, the tag is stored in a per-repository file.
176 178 otherwise, it is stored in the .hgtags file, and a new
177 179 changeset is committed with the change.
178 180
179 181 keyword arguments:
180 182
181 183 local: whether to store tag in non-version-controlled file
182 184 (default False)
183 185
184 186 message: commit message to use if committing
185 187
186 188 user: name of user to use if committing
187 189
188 190 date: date tuple to use if committing'''
189 191
190 192 for c in self.tag_disallowed:
191 193 if c in name:
192 194 raise util.Abort(_('%r cannot be used in a tag name') % c)
193 195
194 196 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
195 197
196 198 if local:
197 199 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
198 200 self.hook('tag', node=hex(node), tag=name, local=local)
199 201 return
200 202
201 203 for x in self.status()[:5]:
202 204 if '.hgtags' in x:
203 205 raise util.Abort(_('working copy of .hgtags is changed '
204 206 '(please commit .hgtags manually)'))
205 207
206 208 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
207 209 if self.dirstate.state('.hgtags') == '?':
208 210 self.add(['.hgtags'])
209 211
210 212 self.commit(['.hgtags'], message, user, date)
211 213 self.hook('tag', node=hex(node), tag=name, local=local)
212 214
213 215 def tags(self):
214 216 '''return a mapping of tag to node'''
215 217 if not self.tagscache:
216 218 self.tagscache = {}
217 219
218 220 def parsetag(line, context):
219 221 if not line:
220 222 return
221 223 s = l.split(" ", 1)
222 224 if len(s) != 2:
223 225 self.ui.warn(_("%s: cannot parse entry\n") % context)
224 226 return
225 227 node, key = s
226 228 key = key.strip()
227 229 try:
228 230 bin_n = bin(node)
229 231 except TypeError:
230 232 self.ui.warn(_("%s: node '%s' is not well formed\n") %
231 233 (context, node))
232 234 return
233 235 if bin_n not in self.changelog.nodemap:
234 236 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
235 237 (context, key))
236 238 return
237 239 self.tagscache[key] = bin_n
238 240
239 241 # read the tags file from each head, ending with the tip,
240 242 # and add each tag found to the map, with "newer" ones
241 243 # taking precedence
242 244 heads = self.heads()
243 245 heads.reverse()
244 246 fl = self.file(".hgtags")
245 247 for node in heads:
246 248 change = self.changelog.read(node)
247 249 rev = self.changelog.rev(node)
248 250 fn, ff = self.manifest.find(change[0], '.hgtags')
249 251 if fn is None: continue
250 252 count = 0
251 253 for l in fl.read(fn).splitlines():
252 254 count += 1
253 255 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
254 256 (rev, short(node), count))
255 257 try:
256 258 f = self.opener("localtags")
257 259 count = 0
258 260 for l in f:
259 261 count += 1
260 262 parsetag(l, _("localtags, line %d") % count)
261 263 except IOError:
262 264 pass
263 265
264 266 self.tagscache['tip'] = self.changelog.tip()
265 267
266 268 return self.tagscache
267 269
268 270 def tagslist(self):
269 271 '''return a list of tags ordered by revision'''
270 272 l = []
271 273 for t, n in self.tags().items():
272 274 try:
273 275 r = self.changelog.rev(n)
274 276 except:
275 277 r = -2 # sort to the beginning of the list if unknown
276 278 l.append((r, t, n))
277 279 l.sort()
278 280 return [(t, n) for r, t, n in l]
279 281
280 282 def nodetags(self, node):
281 283 '''return the tags associated with a node'''
282 284 if not self.nodetagscache:
283 285 self.nodetagscache = {}
284 286 for t, n in self.tags().items():
285 287 self.nodetagscache.setdefault(n, []).append(t)
286 288 return self.nodetagscache.get(node, [])
287 289
288 290 def lookup(self, key):
289 291 try:
290 292 return self.tags()[key]
291 293 except KeyError:
292 294 if key == '.':
293 295 key = self.dirstate.parents()[0]
294 296 if key == nullid:
295 297 raise repo.RepoError(_("no revision checked out"))
296 298 try:
297 299 return self.changelog.lookup(key)
298 300 except:
299 301 raise repo.RepoError(_("unknown revision '%s'") % key)
300 302
301 303 def dev(self):
302 304 return os.lstat(self.path).st_dev
303 305
304 306 def local(self):
305 307 return True
306 308
307 309 def join(self, f):
308 310 return os.path.join(self.path, f)
309 311
310 312 def wjoin(self, f):
311 313 return os.path.join(self.root, f)
312 314
313 315 def file(self, f):
314 316 if f[0] == '/':
315 317 f = f[1:]
316 318 return filelog.filelog(self.opener, f, self.revlogversion)
317 319
318 320 def changectx(self, changeid):
319 321 return context.changectx(self, changeid)
320 322
321 323 def filectx(self, path, changeid=None, fileid=None):
322 324 """changeid can be a changeset revision, node, or tag.
323 325 fileid can be a file revision or node."""
324 326 return context.filectx(self, path, changeid, fileid)
325 327
326 328 def getcwd(self):
327 329 return self.dirstate.getcwd()
328 330
329 331 def wfile(self, f, mode='r'):
330 332 return self.wopener(f, mode)
331 333
332 334 def wread(self, filename):
333 335 if self.encodepats == None:
334 336 l = []
335 337 for pat, cmd in self.ui.configitems("encode"):
336 338 mf = util.matcher(self.root, "", [pat], [], [])[1]
337 339 l.append((mf, cmd))
338 340 self.encodepats = l
339 341
340 342 data = self.wopener(filename, 'r').read()
341 343
342 344 for mf, cmd in self.encodepats:
343 345 if mf(filename):
344 346 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
345 347 data = util.filter(data, cmd)
346 348 break
347 349
348 350 return data
349 351
350 352 def wwrite(self, filename, data, fd=None):
351 353 if self.decodepats == None:
352 354 l = []
353 355 for pat, cmd in self.ui.configitems("decode"):
354 356 mf = util.matcher(self.root, "", [pat], [], [])[1]
355 357 l.append((mf, cmd))
356 358 self.decodepats = l
357 359
358 360 for mf, cmd in self.decodepats:
359 361 if mf(filename):
360 362 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
361 363 data = util.filter(data, cmd)
362 364 break
363 365
364 366 if fd:
365 367 return fd.write(data)
366 368 return self.wopener(filename, 'w').write(data)
367 369
368 370 def transaction(self):
369 371 tr = self.transhandle
370 372 if tr != None and tr.running():
371 373 return tr.nest()
372 374
373 375 # save dirstate for rollback
374 376 try:
375 377 ds = self.opener("dirstate").read()
376 378 except IOError:
377 379 ds = ""
378 380 self.opener("journal.dirstate", "w").write(ds)
379 381
380 382 tr = transaction.transaction(self.ui.warn, self.opener,
381 383 self.join("journal"),
382 384 aftertrans(self.path))
383 385 self.transhandle = tr
384 386 return tr
385 387
386 388 def recover(self):
387 389 l = self.lock()
388 390 if os.path.exists(self.join("journal")):
389 391 self.ui.status(_("rolling back interrupted transaction\n"))
390 392 transaction.rollback(self.opener, self.join("journal"))
391 393 self.reload()
392 394 return True
393 395 else:
394 396 self.ui.warn(_("no interrupted transaction available\n"))
395 397 return False
396 398
397 399 def rollback(self, wlock=None):
398 400 if not wlock:
399 401 wlock = self.wlock()
400 402 l = self.lock()
401 403 if os.path.exists(self.join("undo")):
402 404 self.ui.status(_("rolling back last transaction\n"))
403 405 transaction.rollback(self.opener, self.join("undo"))
404 406 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
405 407 self.reload()
406 408 self.wreload()
407 409 else:
408 410 self.ui.warn(_("no rollback information available\n"))
409 411
410 412 def wreload(self):
411 413 self.dirstate.read()
412 414
413 415 def reload(self):
414 416 self.changelog.load()
415 417 self.manifest.load()
416 418 self.tagscache = None
417 419 self.nodetagscache = None
418 420
419 421 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
420 422 desc=None):
421 423 try:
422 424 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
423 425 except lock.LockHeld, inst:
424 426 if not wait:
425 427 raise
426 428 self.ui.warn(_("waiting for lock on %s held by %s\n") %
427 429 (desc, inst.args[0]))
428 430 # default to 600 seconds timeout
429 431 l = lock.lock(self.join(lockname),
430 432 int(self.ui.config("ui", "timeout") or 600),
431 433 releasefn, desc=desc)
432 434 if acquirefn:
433 435 acquirefn()
434 436 return l
435 437
436 438 def lock(self, wait=1):
437 439 return self.do_lock("lock", wait, acquirefn=self.reload,
438 440 desc=_('repository %s') % self.origroot)
439 441
440 442 def wlock(self, wait=1):
441 443 return self.do_lock("wlock", wait, self.dirstate.write,
442 444 self.wreload,
443 445 desc=_('working directory of %s') % self.origroot)
444 446
445 447 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
446 448 "determine whether a new filenode is needed"
447 449 fp1 = manifest1.get(filename, nullid)
448 450 fp2 = manifest2.get(filename, nullid)
449 451
450 452 if fp2 != nullid:
451 453 # is one parent an ancestor of the other?
452 454 fpa = filelog.ancestor(fp1, fp2)
453 455 if fpa == fp1:
454 456 fp1, fp2 = fp2, nullid
455 457 elif fpa == fp2:
456 458 fp2 = nullid
457 459
458 460 # is the file unmodified from the parent? report existing entry
459 461 if fp2 == nullid and text == filelog.read(fp1):
460 462 return (fp1, None, None)
461 463
462 464 return (None, fp1, fp2)
463 465
464 466 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
465 467 orig_parent = self.dirstate.parents()[0] or nullid
466 468 p1 = p1 or self.dirstate.parents()[0] or nullid
467 469 p2 = p2 or self.dirstate.parents()[1] or nullid
468 470 c1 = self.changelog.read(p1)
469 471 c2 = self.changelog.read(p2)
470 472 m1 = self.manifest.read(c1[0]).copy()
471 473 m2 = self.manifest.read(c2[0])
472 474 changed = []
473 475
474 476 if orig_parent == p1:
475 477 update_dirstate = 1
476 478 else:
477 479 update_dirstate = 0
478 480
479 481 if not wlock:
480 482 wlock = self.wlock()
481 483 l = self.lock()
482 484 tr = self.transaction()
483 485 linkrev = self.changelog.count()
484 486 for f in files:
485 487 try:
486 488 t = self.wread(f)
487 489 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
488 490 r = self.file(f)
489 491
490 492 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
491 493 if entry:
492 494 m1[f] = entry
493 495 continue
494 496
495 497 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
496 498 changed.append(f)
497 499 if update_dirstate:
498 500 self.dirstate.update([f], "n")
499 501 except IOError:
500 502 try:
501 503 del m1[f]
502 504 if update_dirstate:
503 505 self.dirstate.forget([f])
504 506 except:
505 507 # deleted from p2?
506 508 pass
507 509
508 510 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
509 511 user = user or self.ui.username()
510 512 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
511 513 tr.close()
512 514 if update_dirstate:
513 515 self.dirstate.setparents(n, nullid)
514 516
515 517 def commit(self, files=None, text="", user=None, date=None,
516 518 match=util.always, force=False, lock=None, wlock=None,
517 519 force_editor=False):
518 520 commit = []
519 521 remove = []
520 522 changed = []
521 523
522 524 if files:
523 525 for f in files:
524 526 s = self.dirstate.state(f)
525 527 if s in 'nmai':
526 528 commit.append(f)
527 529 elif s == 'r':
528 530 remove.append(f)
529 531 else:
530 532 self.ui.warn(_("%s not tracked!\n") % f)
531 533 else:
532 534 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
533 535 commit = modified + added
534 536 remove = removed
535 537
536 538 p1, p2 = self.dirstate.parents()
537 539 c1 = self.changelog.read(p1)
538 540 c2 = self.changelog.read(p2)
539 541 m1 = self.manifest.read(c1[0]).copy()
540 542 m2 = self.manifest.read(c2[0])
541 543
542 544 if not commit and not remove and not force and p2 == nullid:
543 545 self.ui.status(_("nothing changed\n"))
544 546 return None
545 547
546 548 xp1 = hex(p1)
547 549 if p2 == nullid: xp2 = ''
548 550 else: xp2 = hex(p2)
549 551
550 552 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
551 553
552 554 if not wlock:
553 555 wlock = self.wlock()
554 556 if not lock:
555 557 lock = self.lock()
556 558 tr = self.transaction()
557 559
558 560 # check in files
559 561 new = {}
560 562 linkrev = self.changelog.count()
561 563 commit.sort()
562 564 for f in commit:
563 565 self.ui.note(f + "\n")
564 566 try:
565 567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
566 568 t = self.wread(f)
567 569 except IOError:
568 570 self.ui.warn(_("trouble committing %s!\n") % f)
569 571 raise
570 572
571 573 r = self.file(f)
572 574
573 575 meta = {}
574 576 cp = self.dirstate.copied(f)
575 577 if cp:
576 578 meta["copy"] = cp
577 579 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
578 580 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
579 581 fp1, fp2 = nullid, nullid
580 582 else:
581 583 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
582 584 if entry:
583 585 new[f] = entry
584 586 continue
585 587
586 588 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
587 589 # remember what we've added so that we can later calculate
588 590 # the files to pull from a set of changesets
589 591 changed.append(f)
590 592
591 593 # update manifest
592 594 m1.update(new)
593 595 for f in remove:
594 596 if f in m1:
595 597 del m1[f]
596 598 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
597 599 (new, remove))
598 600
599 601 # add changeset
600 602 new = new.keys()
601 603 new.sort()
602 604
603 605 user = user or self.ui.username()
604 606 if not text or force_editor:
605 607 edittext = []
606 608 if text:
607 609 edittext.append(text)
608 610 edittext.append("")
609 611 if p2 != nullid:
610 612 edittext.append("HG: branch merge")
611 613 edittext.extend(["HG: changed %s" % f for f in changed])
612 614 edittext.extend(["HG: removed %s" % f for f in remove])
613 615 if not changed and not remove:
614 616 edittext.append("HG: no files changed")
615 617 edittext.append("")
616 618 # run editor in the repository root
617 619 olddir = os.getcwd()
618 620 os.chdir(self.root)
619 621 text = self.ui.edit("\n".join(edittext), user)
620 622 os.chdir(olddir)
621 623
622 624 lines = [line.rstrip() for line in text.rstrip().splitlines()]
623 625 while lines and not lines[0]:
624 626 del lines[0]
625 627 if not lines:
626 628 return None
627 629 text = '\n'.join(lines)
628 630 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
629 631 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
630 632 parent2=xp2)
631 633 tr.close()
632 634
633 635 self.dirstate.setparents(n)
634 636 self.dirstate.update(new, "n")
635 637 self.dirstate.forget(remove)
636 638
637 639 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
638 640 return n
639 641
640 642 def walk(self, node=None, files=[], match=util.always, badmatch=None):
641 643 if node:
642 644 fdict = dict.fromkeys(files)
643 645 for fn in self.manifest.read(self.changelog.read(node)[0]):
644 646 for ffn in fdict:
645 647 # match if the file is the exact name or a directory
646 648 if ffn == fn or fn.startswith("%s/" % ffn):
647 649 del fdict[ffn]
648 650 break
649 651 if match(fn):
650 652 yield 'm', fn
651 653 for fn in fdict:
652 654 if badmatch and badmatch(fn):
653 655 if match(fn):
654 656 yield 'b', fn
655 657 else:
656 658 self.ui.warn(_('%s: No such file in rev %s\n') % (
657 659 util.pathto(self.getcwd(), fn), short(node)))
658 660 else:
659 661 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
660 662 yield src, fn
661 663
662 664 def status(self, node1=None, node2=None, files=[], match=util.always,
663 665 wlock=None, list_ignored=False, list_clean=False):
664 666 """return status of files between two nodes or node and working directory
665 667
666 668 If node1 is None, use the first dirstate parent instead.
667 669 If node2 is None, compare node1 with working directory.
668 670 """
669 671
670 672 def fcmp(fn, mf):
671 673 t1 = self.wread(fn)
672 674 return self.file(fn).cmp(mf.get(fn, nullid), t1)
673 675
674 676 def mfmatches(node):
675 677 change = self.changelog.read(node)
676 678 mf = dict(self.manifest.read(change[0]))
677 679 for fn in mf.keys():
678 680 if not match(fn):
679 681 del mf[fn]
680 682 return mf
681 683
682 684 modified, added, removed, deleted, unknown = [], [], [], [], []
683 685 ignored, clean = [], []
684 686
685 687 compareworking = False
686 688 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
687 689 compareworking = True
688 690
689 691 if not compareworking:
690 692 # read the manifest from node1 before the manifest from node2,
691 693 # so that we'll hit the manifest cache if we're going through
692 694 # all the revisions in parent->child order.
693 695 mf1 = mfmatches(node1)
694 696
695 697 # are we comparing the working directory?
696 698 if not node2:
697 699 if not wlock:
698 700 try:
699 701 wlock = self.wlock(wait=0)
700 702 except lock.LockException:
701 703 wlock = None
702 704 (lookup, modified, added, removed, deleted, unknown,
703 705 ignored, clean) = self.dirstate.status(files, match,
704 706 list_ignored, list_clean)
705 707
706 708 # are we comparing working dir against its parent?
707 709 if compareworking:
708 710 if lookup:
709 711 # do a full compare of any files that might have changed
710 712 mf2 = mfmatches(self.dirstate.parents()[0])
711 713 for f in lookup:
712 714 if fcmp(f, mf2):
713 715 modified.append(f)
714 716 else:
715 717 clean.append(f)
716 718 if wlock is not None:
717 719 self.dirstate.update([f], "n")
718 720 else:
719 721 # we are comparing working dir against non-parent
720 722 # generate a pseudo-manifest for the working dir
721 723 mf2 = mfmatches(self.dirstate.parents()[0])
722 724 for f in lookup + modified + added:
723 725 mf2[f] = ""
724 726 for f in removed:
725 727 if f in mf2:
726 728 del mf2[f]
727 729 else:
728 730 # we are comparing two revisions
729 731 mf2 = mfmatches(node2)
730 732
731 733 if not compareworking:
732 734 # flush lists from dirstate before comparing manifests
733 735 modified, added, clean = [], [], []
734 736
735 737 # make sure to sort the files so we talk to the disk in a
736 738 # reasonable order
737 739 mf2keys = mf2.keys()
738 740 mf2keys.sort()
739 741 for fn in mf2keys:
740 742 if mf1.has_key(fn):
741 743 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
742 744 modified.append(fn)
743 745 elif list_clean:
744 746 clean.append(fn)
745 747 del mf1[fn]
746 748 else:
747 749 added.append(fn)
748 750
749 751 removed = mf1.keys()
750 752
751 753 # sort and return results:
752 754 for l in modified, added, removed, deleted, unknown, ignored, clean:
753 755 l.sort()
754 756 return (modified, added, removed, deleted, unknown, ignored, clean)
755 757
756 758 def add(self, list, wlock=None):
757 759 if not wlock:
758 760 wlock = self.wlock()
759 761 for f in list:
760 762 p = self.wjoin(f)
761 763 if not os.path.exists(p):
762 764 self.ui.warn(_("%s does not exist!\n") % f)
763 765 elif not os.path.isfile(p):
764 766 self.ui.warn(_("%s not added: only files supported currently\n")
765 767 % f)
766 768 elif self.dirstate.state(f) in 'an':
767 769 self.ui.warn(_("%s already tracked!\n") % f)
768 770 else:
769 771 self.dirstate.update([f], "a")
770 772
771 773 def forget(self, list, wlock=None):
772 774 if not wlock:
773 775 wlock = self.wlock()
774 776 for f in list:
775 777 if self.dirstate.state(f) not in 'ai':
776 778 self.ui.warn(_("%s not added!\n") % f)
777 779 else:
778 780 self.dirstate.forget([f])
779 781
780 782 def remove(self, list, unlink=False, wlock=None):
781 783 if unlink:
782 784 for f in list:
783 785 try:
784 786 util.unlink(self.wjoin(f))
785 787 except OSError, inst:
786 788 if inst.errno != errno.ENOENT:
787 789 raise
788 790 if not wlock:
789 791 wlock = self.wlock()
790 792 for f in list:
791 793 p = self.wjoin(f)
792 794 if os.path.exists(p):
793 795 self.ui.warn(_("%s still exists!\n") % f)
794 796 elif self.dirstate.state(f) == 'a':
795 797 self.dirstate.forget([f])
796 798 elif f not in self.dirstate:
797 799 self.ui.warn(_("%s not tracked!\n") % f)
798 800 else:
799 801 self.dirstate.update([f], "r")
800 802
801 803 def undelete(self, list, wlock=None):
802 804 p = self.dirstate.parents()[0]
803 805 mn = self.changelog.read(p)[0]
804 806 m = self.manifest.read(mn)
805 807 if not wlock:
806 808 wlock = self.wlock()
807 809 for f in list:
808 810 if self.dirstate.state(f) not in "r":
809 811 self.ui.warn("%s not removed!\n" % f)
810 812 else:
811 813 t = self.file(f).read(m[f])
812 814 self.wwrite(f, t)
813 815 util.set_exec(self.wjoin(f), m.execf(f))
814 816 self.dirstate.update([f], "n")
815 817
816 818 def copy(self, source, dest, wlock=None):
817 819 p = self.wjoin(dest)
818 820 if not os.path.exists(p):
819 821 self.ui.warn(_("%s does not exist!\n") % dest)
820 822 elif not os.path.isfile(p):
821 823 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
822 824 else:
823 825 if not wlock:
824 826 wlock = self.wlock()
825 827 if self.dirstate.state(dest) == '?':
826 828 self.dirstate.update([dest], "a")
827 829 self.dirstate.copy(source, dest)
828 830
829 831 def heads(self, start=None):
830 832 heads = self.changelog.heads(start)
831 833 # sort the output in rev descending order
832 834 heads = [(-self.changelog.rev(h), h) for h in heads]
833 835 heads.sort()
834 836 return [n for (r, n) in heads]
835 837
836 838 # branchlookup returns a dict giving a list of branches for
837 839 # each head. A branch is defined as the tag of a node or
838 840 # the branch of the node's parents. If a node has multiple
839 841 # branch tags, tags are eliminated if they are visible from other
840 842 # branch tags.
841 843 #
842 844 # So, for this graph: a->b->c->d->e
843 845 # \ /
844 846 # aa -----/
845 847 # a has tag 2.6.12
846 848 # d has tag 2.6.13
847 849 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
848 850 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
849 851 # from the list.
850 852 #
851 853 # It is possible that more than one head will have the same branch tag.
852 854 # callers need to check the result for multiple heads under the same
853 855 # branch tag if that is a problem for them (ie checkout of a specific
854 856 # branch).
855 857 #
856 858 # passing in a specific branch will limit the depth of the search
857 859 # through the parents. It won't limit the branches returned in the
858 860 # result though.
859 861 def branchlookup(self, heads=None, branch=None):
860 862 if not heads:
861 863 heads = self.heads()
862 864 headt = [ h for h in heads ]
863 865 chlog = self.changelog
864 866 branches = {}
865 867 merges = []
866 868 seenmerge = {}
867 869
868 870 # traverse the tree once for each head, recording in the branches
869 871 # dict which tags are visible from this head. The branches
870 872 # dict also records which tags are visible from each tag
871 873 # while we traverse.
872 874 while headt or merges:
873 875 if merges:
874 876 n, found = merges.pop()
875 877 visit = [n]
876 878 else:
877 879 h = headt.pop()
878 880 visit = [h]
879 881 found = [h]
880 882 seen = {}
881 883 while visit:
882 884 n = visit.pop()
883 885 if n in seen:
884 886 continue
885 887 pp = chlog.parents(n)
886 888 tags = self.nodetags(n)
887 889 if tags:
888 890 for x in tags:
889 891 if x == 'tip':
890 892 continue
891 893 for f in found:
892 894 branches.setdefault(f, {})[n] = 1
893 895 branches.setdefault(n, {})[n] = 1
894 896 break
895 897 if n not in found:
896 898 found.append(n)
897 899 if branch in tags:
898 900 continue
899 901 seen[n] = 1
900 902 if pp[1] != nullid and n not in seenmerge:
901 903 merges.append((pp[1], [x for x in found]))
902 904 seenmerge[n] = 1
903 905 if pp[0] != nullid:
904 906 visit.append(pp[0])
905 907 # traverse the branches dict, eliminating branch tags from each
906 908 # head that are visible from another branch tag for that head.
907 909 out = {}
908 910 viscache = {}
909 911 for h in heads:
910 912 def visible(node):
911 913 if node in viscache:
912 914 return viscache[node]
913 915 ret = {}
914 916 visit = [node]
915 917 while visit:
916 918 x = visit.pop()
917 919 if x in viscache:
918 920 ret.update(viscache[x])
919 921 elif x not in ret:
920 922 ret[x] = 1
921 923 if x in branches:
922 924 visit[len(visit):] = branches[x].keys()
923 925 viscache[node] = ret
924 926 return ret
925 927 if h not in branches:
926 928 continue
927 929 # O(n^2), but somewhat limited. This only searches the
928 930 # tags visible from a specific head, not all the tags in the
929 931 # whole repo.
930 932 for b in branches[h]:
931 933 vis = False
932 934 for bb in branches[h].keys():
933 935 if b != bb:
934 936 if b in visible(bb):
935 937 vis = True
936 938 break
937 939 if not vis:
938 940 l = out.setdefault(h, [])
939 941 l[len(l):] = self.nodetags(b)
940 942 return out
941 943
942 944 def branches(self, nodes):
943 945 if not nodes:
944 946 nodes = [self.changelog.tip()]
945 947 b = []
946 948 for n in nodes:
947 949 t = n
948 950 while 1:
949 951 p = self.changelog.parents(n)
950 952 if p[1] != nullid or p[0] == nullid:
951 953 b.append((t, n, p[0], p[1]))
952 954 break
953 955 n = p[0]
954 956 return b
955 957
956 958 def between(self, pairs):
957 959 r = []
958 960
959 961 for top, bottom in pairs:
960 962 n, l, i = top, [], 0
961 963 f = 1
962 964
963 965 while n != bottom:
964 966 p = self.changelog.parents(n)[0]
965 967 if i == f:
966 968 l.append(n)
967 969 f = f * 2
968 970 n = p
969 971 i += 1
970 972
971 973 r.append(l)
972 974
973 975 return r
974 976
975 977 def findincoming(self, remote, base=None, heads=None, force=False):
976 978 """Return list of roots of the subsets of missing nodes from remote
977 979
978 980 If base dict is specified, assume that these nodes and their parents
979 981 exist on the remote side and that no child of a node of base exists
980 982 in both remote and self.
981 983 Furthermore base will be updated to include the nodes that exists
982 984 in self and remote but no children exists in self and remote.
983 985 If a list of heads is specified, return only nodes which are heads
984 986 or ancestors of these heads.
985 987
986 988 All the ancestors of base are in self and in remote.
987 989 All the descendants of the list returned are missing in self.
988 990 (and so we know that the rest of the nodes are missing in remote, see
989 991 outgoing)
990 992 """
991 993 m = self.changelog.nodemap
992 994 search = []
993 995 fetch = {}
994 996 seen = {}
995 997 seenbranch = {}
996 998 if base == None:
997 999 base = {}
998 1000
999 1001 if not heads:
1000 1002 heads = remote.heads()
1001 1003
1002 1004 if self.changelog.tip() == nullid:
1003 1005 base[nullid] = 1
1004 1006 if heads != [nullid]:
1005 1007 return [nullid]
1006 1008 return []
1007 1009
1008 1010 # assume we're closer to the tip than the root
1009 1011 # and start by examining the heads
1010 1012 self.ui.status(_("searching for changes\n"))
1011 1013
1012 1014 unknown = []
1013 1015 for h in heads:
1014 1016 if h not in m:
1015 1017 unknown.append(h)
1016 1018 else:
1017 1019 base[h] = 1
1018 1020
1019 1021 if not unknown:
1020 1022 return []
1021 1023
1022 1024 req = dict.fromkeys(unknown)
1023 1025 reqcnt = 0
1024 1026
1025 1027 # search through remote branches
1026 1028 # a 'branch' here is a linear segment of history, with four parts:
1027 1029 # head, root, first parent, second parent
1028 1030 # (a branch always has two parents (or none) by definition)
1029 1031 unknown = remote.branches(unknown)
1030 1032 while unknown:
1031 1033 r = []
1032 1034 while unknown:
1033 1035 n = unknown.pop(0)
1034 1036 if n[0] in seen:
1035 1037 continue
1036 1038
1037 1039 self.ui.debug(_("examining %s:%s\n")
1038 1040 % (short(n[0]), short(n[1])))
1039 1041 if n[0] == nullid: # found the end of the branch
1040 1042 pass
1041 1043 elif n in seenbranch:
1042 1044 self.ui.debug(_("branch already found\n"))
1043 1045 continue
1044 1046 elif n[1] and n[1] in m: # do we know the base?
1045 1047 self.ui.debug(_("found incomplete branch %s:%s\n")
1046 1048 % (short(n[0]), short(n[1])))
1047 1049 search.append(n) # schedule branch range for scanning
1048 1050 seenbranch[n] = 1
1049 1051 else:
1050 1052 if n[1] not in seen and n[1] not in fetch:
1051 1053 if n[2] in m and n[3] in m:
1052 1054 self.ui.debug(_("found new changeset %s\n") %
1053 1055 short(n[1]))
1054 1056 fetch[n[1]] = 1 # earliest unknown
1055 1057 for p in n[2:4]:
1056 1058 if p in m:
1057 1059 base[p] = 1 # latest known
1058 1060
1059 1061 for p in n[2:4]:
1060 1062 if p not in req and p not in m:
1061 1063 r.append(p)
1062 1064 req[p] = 1
1063 1065 seen[n[0]] = 1
1064 1066
1065 1067 if r:
1066 1068 reqcnt += 1
1067 1069 self.ui.debug(_("request %d: %s\n") %
1068 1070 (reqcnt, " ".join(map(short, r))))
1069 1071 for p in range(0, len(r), 10):
1070 1072 for b in remote.branches(r[p:p+10]):
1071 1073 self.ui.debug(_("received %s:%s\n") %
1072 1074 (short(b[0]), short(b[1])))
1073 1075 unknown.append(b)
1074 1076
1075 1077 # do binary search on the branches we found
1076 1078 while search:
1077 1079 n = search.pop(0)
1078 1080 reqcnt += 1
1079 1081 l = remote.between([(n[0], n[1])])[0]
1080 1082 l.append(n[1])
1081 1083 p = n[0]
1082 1084 f = 1
1083 1085 for i in l:
1084 1086 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1085 1087 if i in m:
1086 1088 if f <= 2:
1087 1089 self.ui.debug(_("found new branch changeset %s\n") %
1088 1090 short(p))
1089 1091 fetch[p] = 1
1090 1092 base[i] = 1
1091 1093 else:
1092 1094 self.ui.debug(_("narrowed branch search to %s:%s\n")
1093 1095 % (short(p), short(i)))
1094 1096 search.append((p, i))
1095 1097 break
1096 1098 p, f = i, f * 2
1097 1099
1098 1100 # sanity check our fetch list
1099 1101 for f in fetch.keys():
1100 1102 if f in m:
1101 1103 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1102 1104
1103 1105 if base.keys() == [nullid]:
1104 1106 if force:
1105 1107 self.ui.warn(_("warning: repository is unrelated\n"))
1106 1108 else:
1107 1109 raise util.Abort(_("repository is unrelated"))
1108 1110
1109 1111 self.ui.debug(_("found new changesets starting at ") +
1110 1112 " ".join([short(f) for f in fetch]) + "\n")
1111 1113
1112 1114 self.ui.debug(_("%d total queries\n") % reqcnt)
1113 1115
1114 1116 return fetch.keys()
1115 1117
1116 1118 def findoutgoing(self, remote, base=None, heads=None, force=False):
1117 1119 """Return list of nodes that are roots of subsets not in remote
1118 1120
1119 1121 If base dict is specified, assume that these nodes and their parents
1120 1122 exist on the remote side.
1121 1123 If a list of heads is specified, return only nodes which are heads
1122 1124 or ancestors of these heads, and return a second element which
1123 1125 contains all remote heads which get new children.
1124 1126 """
1125 1127 if base == None:
1126 1128 base = {}
1127 1129 self.findincoming(remote, base, heads, force=force)
1128 1130
1129 1131 self.ui.debug(_("common changesets up to ")
1130 1132 + " ".join(map(short, base.keys())) + "\n")
1131 1133
1132 1134 remain = dict.fromkeys(self.changelog.nodemap)
1133 1135
1134 1136 # prune everything remote has from the tree
1135 1137 del remain[nullid]
1136 1138 remove = base.keys()
1137 1139 while remove:
1138 1140 n = remove.pop(0)
1139 1141 if n in remain:
1140 1142 del remain[n]
1141 1143 for p in self.changelog.parents(n):
1142 1144 remove.append(p)
1143 1145
1144 1146 # find every node whose parents have been pruned
1145 1147 subset = []
1146 1148 # find every remote head that will get new children
1147 1149 updated_heads = {}
1148 1150 for n in remain:
1149 1151 p1, p2 = self.changelog.parents(n)
1150 1152 if p1 not in remain and p2 not in remain:
1151 1153 subset.append(n)
1152 1154 if heads:
1153 1155 if p1 in heads:
1154 1156 updated_heads[p1] = True
1155 1157 if p2 in heads:
1156 1158 updated_heads[p2] = True
1157 1159
1158 1160 # this is the set of all roots we have to push
1159 1161 if heads:
1160 1162 return subset, updated_heads.keys()
1161 1163 else:
1162 1164 return subset
1163 1165
1164 1166 def pull(self, remote, heads=None, force=False, lock=None):
1165 1167 mylock = False
1166 1168 if not lock:
1167 1169 lock = self.lock()
1168 1170 mylock = True
1169 1171
1170 1172 try:
1171 1173 fetch = self.findincoming(remote, force=force)
1172 1174 if fetch == [nullid]:
1173 1175 self.ui.status(_("requesting all changes\n"))
1174 1176
1175 1177 if not fetch:
1176 1178 self.ui.status(_("no changes found\n"))
1177 1179 return 0
1178 1180
1179 1181 if heads is None:
1180 1182 cg = remote.changegroup(fetch, 'pull')
1181 1183 else:
1182 1184 cg = remote.changegroupsubset(fetch, heads, 'pull')
1183 1185 return self.addchangegroup(cg, 'pull', remote.url())
1184 1186 finally:
1185 1187 if mylock:
1186 1188 lock.release()
1187 1189
1188 1190 def push(self, remote, force=False, revs=None):
1189 1191 # there are two ways to push to remote repo:
1190 1192 #
1191 1193 # addchangegroup assumes local user can lock remote
1192 1194 # repo (local filesystem, old ssh servers).
1193 1195 #
1194 1196 # unbundle assumes local user cannot lock remote repo (new ssh
1195 1197 # servers, http servers).
1196 1198
1197 1199 if remote.capable('unbundle'):
1198 1200 return self.push_unbundle(remote, force, revs)
1199 1201 return self.push_addchangegroup(remote, force, revs)
1200 1202
1201 1203 def prepush(self, remote, force, revs):
1202 1204 base = {}
1203 1205 remote_heads = remote.heads()
1204 1206 inc = self.findincoming(remote, base, remote_heads, force=force)
1205 1207 if not force and inc:
1206 1208 self.ui.warn(_("abort: unsynced remote changes!\n"))
1207 1209 self.ui.status(_("(did you forget to sync?"
1208 1210 " use push -f to force)\n"))
1209 1211 return None, 1
1210 1212
1211 1213 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1212 1214 if revs is not None:
1213 1215 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1214 1216 else:
1215 1217 bases, heads = update, self.changelog.heads()
1216 1218
1217 1219 if not bases:
1218 1220 self.ui.status(_("no changes found\n"))
1219 1221 return None, 1
1220 1222 elif not force:
1221 1223 # FIXME we don't properly detect creation of new heads
1222 1224 # in the push -r case, assume the user knows what he's doing
1223 1225 if not revs and len(remote_heads) < len(heads) \
1224 1226 and remote_heads != [nullid]:
1225 1227 self.ui.warn(_("abort: push creates new remote branches!\n"))
1226 1228 self.ui.status(_("(did you forget to merge?"
1227 1229 " use push -f to force)\n"))
1228 1230 return None, 1
1229 1231
1230 1232 if revs is None:
1231 1233 cg = self.changegroup(update, 'push')
1232 1234 else:
1233 1235 cg = self.changegroupsubset(update, revs, 'push')
1234 1236 return cg, remote_heads
1235 1237
1236 1238 def push_addchangegroup(self, remote, force, revs):
1237 1239 lock = remote.lock()
1238 1240
1239 1241 ret = self.prepush(remote, force, revs)
1240 1242 if ret[0] is not None:
1241 1243 cg, remote_heads = ret
1242 1244 return remote.addchangegroup(cg, 'push', self.url())
1243 1245 return ret[1]
1244 1246
1245 1247 def push_unbundle(self, remote, force, revs):
1246 1248 # local repo finds heads on server, finds out what revs it
1247 1249 # must push. once revs transferred, if server finds it has
1248 1250 # different heads (someone else won commit/push race), server
1249 1251 # aborts.
1250 1252
1251 1253 ret = self.prepush(remote, force, revs)
1252 1254 if ret[0] is not None:
1253 1255 cg, remote_heads = ret
1254 1256 if force: remote_heads = ['force']
1255 1257 return remote.unbundle(cg, remote_heads, 'push')
1256 1258 return ret[1]
1257 1259
1258 1260 def changegroupsubset(self, bases, heads, source):
1259 1261 """This function generates a changegroup consisting of all the nodes
1260 1262 that are descendents of any of the bases, and ancestors of any of
1261 1263 the heads.
1262 1264
1263 1265 It is fairly complex as determining which filenodes and which
1264 1266 manifest nodes need to be included for the changeset to be complete
1265 1267 is non-trivial.
1266 1268
1267 1269 Another wrinkle is doing the reverse, figuring out which changeset in
1268 1270 the changegroup a particular filenode or manifestnode belongs to."""
1269 1271
1270 1272 self.hook('preoutgoing', throw=True, source=source)
1271 1273
1272 1274 # Set up some initial variables
1273 1275 # Make it easy to refer to self.changelog
1274 1276 cl = self.changelog
1275 1277 # msng is short for missing - compute the list of changesets in this
1276 1278 # changegroup.
1277 1279 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1278 1280 # Some bases may turn out to be superfluous, and some heads may be
1279 1281 # too. nodesbetween will return the minimal set of bases and heads
1280 1282 # necessary to re-create the changegroup.
1281 1283
1282 1284 # Known heads are the list of heads that it is assumed the recipient
1283 1285 # of this changegroup will know about.
1284 1286 knownheads = {}
1285 1287 # We assume that all parents of bases are known heads.
1286 1288 for n in bases:
1287 1289 for p in cl.parents(n):
1288 1290 if p != nullid:
1289 1291 knownheads[p] = 1
1290 1292 knownheads = knownheads.keys()
1291 1293 if knownheads:
1292 1294 # Now that we know what heads are known, we can compute which
1293 1295 # changesets are known. The recipient must know about all
1294 1296 # changesets required to reach the known heads from the null
1295 1297 # changeset.
1296 1298 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1297 1299 junk = None
1298 1300 # Transform the list into an ersatz set.
1299 1301 has_cl_set = dict.fromkeys(has_cl_set)
1300 1302 else:
1301 1303 # If there were no known heads, the recipient cannot be assumed to
1302 1304 # know about any changesets.
1303 1305 has_cl_set = {}
1304 1306
1305 1307 # Make it easy to refer to self.manifest
1306 1308 mnfst = self.manifest
1307 1309 # We don't know which manifests are missing yet
1308 1310 msng_mnfst_set = {}
1309 1311 # Nor do we know which filenodes are missing.
1310 1312 msng_filenode_set = {}
1311 1313
1312 1314 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1313 1315 junk = None
1314 1316
1315 1317 # A changeset always belongs to itself, so the changenode lookup
1316 1318 # function for a changenode is identity.
1317 1319 def identity(x):
1318 1320 return x
1319 1321
1320 1322 # A function generating function. Sets up an environment for the
1321 1323 # inner function.
1322 1324 def cmp_by_rev_func(revlog):
1323 1325 # Compare two nodes by their revision number in the environment's
1324 1326 # revision history. Since the revision number both represents the
1325 1327 # most efficient order to read the nodes in, and represents a
1326 1328 # topological sorting of the nodes, this function is often useful.
1327 1329 def cmp_by_rev(a, b):
1328 1330 return cmp(revlog.rev(a), revlog.rev(b))
1329 1331 return cmp_by_rev
1330 1332
1331 1333 # If we determine that a particular file or manifest node must be a
1332 1334 # node that the recipient of the changegroup will already have, we can
1333 1335 # also assume the recipient will have all the parents. This function
1334 1336 # prunes them from the set of missing nodes.
1335 1337 def prune_parents(revlog, hasset, msngset):
1336 1338 haslst = hasset.keys()
1337 1339 haslst.sort(cmp_by_rev_func(revlog))
1338 1340 for node in haslst:
1339 1341 parentlst = [p for p in revlog.parents(node) if p != nullid]
1340 1342 while parentlst:
1341 1343 n = parentlst.pop()
1342 1344 if n not in hasset:
1343 1345 hasset[n] = 1
1344 1346 p = [p for p in revlog.parents(n) if p != nullid]
1345 1347 parentlst.extend(p)
1346 1348 for n in hasset:
1347 1349 msngset.pop(n, None)
1348 1350
1349 1351 # This is a function generating function used to set up an environment
1350 1352 # for the inner function to execute in.
1351 1353 def manifest_and_file_collector(changedfileset):
1352 1354 # This is an information gathering function that gathers
1353 1355 # information from each changeset node that goes out as part of
1354 1356 # the changegroup. The information gathered is a list of which
1355 1357 # manifest nodes are potentially required (the recipient may
1356 1358 # already have them) and total list of all files which were
1357 1359 # changed in any changeset in the changegroup.
1358 1360 #
1359 1361 # We also remember the first changenode we saw any manifest
1360 1362 # referenced by so we can later determine which changenode 'owns'
1361 1363 # the manifest.
1362 1364 def collect_manifests_and_files(clnode):
1363 1365 c = cl.read(clnode)
1364 1366 for f in c[3]:
1365 1367 # This is to make sure we only have one instance of each
1366 1368 # filename string for each filename.
1367 1369 changedfileset.setdefault(f, f)
1368 1370 msng_mnfst_set.setdefault(c[0], clnode)
1369 1371 return collect_manifests_and_files
1370 1372
1371 1373 # Figure out which manifest nodes (of the ones we think might be part
1372 1374 # of the changegroup) the recipient must know about and remove them
1373 1375 # from the changegroup.
1374 1376 def prune_manifests():
1375 1377 has_mnfst_set = {}
1376 1378 for n in msng_mnfst_set:
1377 1379 # If a 'missing' manifest thinks it belongs to a changenode
1378 1380 # the recipient is assumed to have, obviously the recipient
1379 1381 # must have that manifest.
1380 1382 linknode = cl.node(mnfst.linkrev(n))
1381 1383 if linknode in has_cl_set:
1382 1384 has_mnfst_set[n] = 1
1383 1385 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1384 1386
1385 1387 # Use the information collected in collect_manifests_and_files to say
1386 1388 # which changenode any manifestnode belongs to.
1387 1389 def lookup_manifest_link(mnfstnode):
1388 1390 return msng_mnfst_set[mnfstnode]
1389 1391
1390 1392 # A function generating function that sets up the initial environment
1391 1393 # the inner function.
1392 1394 def filenode_collector(changedfiles):
1393 1395 next_rev = [0]
1394 1396 # This gathers information from each manifestnode included in the
1395 1397 # changegroup about which filenodes the manifest node references
1396 1398 # so we can include those in the changegroup too.
1397 1399 #
1398 1400 # It also remembers which changenode each filenode belongs to. It
1399 1401 # does this by assuming the a filenode belongs to the changenode
1400 1402 # the first manifest that references it belongs to.
1401 1403 def collect_msng_filenodes(mnfstnode):
1402 1404 r = mnfst.rev(mnfstnode)
1403 1405 if r == next_rev[0]:
1404 1406 # If the last rev we looked at was the one just previous,
1405 1407 # we only need to see a diff.
1406 1408 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1407 1409 # For each line in the delta
1408 1410 for dline in delta.splitlines():
1409 1411 # get the filename and filenode for that line
1410 1412 f, fnode = dline.split('\0')
1411 1413 fnode = bin(fnode[:40])
1412 1414 f = changedfiles.get(f, None)
1413 1415 # And if the file is in the list of files we care
1414 1416 # about.
1415 1417 if f is not None:
1416 1418 # Get the changenode this manifest belongs to
1417 1419 clnode = msng_mnfst_set[mnfstnode]
1418 1420 # Create the set of filenodes for the file if
1419 1421 # there isn't one already.
1420 1422 ndset = msng_filenode_set.setdefault(f, {})
1421 1423 # And set the filenode's changelog node to the
1422 1424 # manifest's if it hasn't been set already.
1423 1425 ndset.setdefault(fnode, clnode)
1424 1426 else:
1425 1427 # Otherwise we need a full manifest.
1426 1428 m = mnfst.read(mnfstnode)
1427 1429 # For every file in we care about.
1428 1430 for f in changedfiles:
1429 1431 fnode = m.get(f, None)
1430 1432 # If it's in the manifest
1431 1433 if fnode is not None:
1432 1434 # See comments above.
1433 1435 clnode = msng_mnfst_set[mnfstnode]
1434 1436 ndset = msng_filenode_set.setdefault(f, {})
1435 1437 ndset.setdefault(fnode, clnode)
1436 1438 # Remember the revision we hope to see next.
1437 1439 next_rev[0] = r + 1
1438 1440 return collect_msng_filenodes
1439 1441
1440 1442 # We have a list of filenodes we think we need for a file, lets remove
1441 1443 # all those we now the recipient must have.
1442 1444 def prune_filenodes(f, filerevlog):
1443 1445 msngset = msng_filenode_set[f]
1444 1446 hasset = {}
1445 1447 # If a 'missing' filenode thinks it belongs to a changenode we
1446 1448 # assume the recipient must have, then the recipient must have
1447 1449 # that filenode.
1448 1450 for n in msngset:
1449 1451 clnode = cl.node(filerevlog.linkrev(n))
1450 1452 if clnode in has_cl_set:
1451 1453 hasset[n] = 1
1452 1454 prune_parents(filerevlog, hasset, msngset)
1453 1455
1454 1456 # A function generator function that sets up the a context for the
1455 1457 # inner function.
1456 1458 def lookup_filenode_link_func(fname):
1457 1459 msngset = msng_filenode_set[fname]
1458 1460 # Lookup the changenode the filenode belongs to.
1459 1461 def lookup_filenode_link(fnode):
1460 1462 return msngset[fnode]
1461 1463 return lookup_filenode_link
1462 1464
1463 1465 # Now that we have all theses utility functions to help out and
1464 1466 # logically divide up the task, generate the group.
1465 1467 def gengroup():
1466 1468 # The set of changed files starts empty.
1467 1469 changedfiles = {}
1468 1470 # Create a changenode group generator that will call our functions
1469 1471 # back to lookup the owning changenode and collect information.
1470 1472 group = cl.group(msng_cl_lst, identity,
1471 1473 manifest_and_file_collector(changedfiles))
1472 1474 for chnk in group:
1473 1475 yield chnk
1474 1476
1475 1477 # The list of manifests has been collected by the generator
1476 1478 # calling our functions back.
1477 1479 prune_manifests()
1478 1480 msng_mnfst_lst = msng_mnfst_set.keys()
1479 1481 # Sort the manifestnodes by revision number.
1480 1482 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1481 1483 # Create a generator for the manifestnodes that calls our lookup
1482 1484 # and data collection functions back.
1483 1485 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1484 1486 filenode_collector(changedfiles))
1485 1487 for chnk in group:
1486 1488 yield chnk
1487 1489
1488 1490 # These are no longer needed, dereference and toss the memory for
1489 1491 # them.
1490 1492 msng_mnfst_lst = None
1491 1493 msng_mnfst_set.clear()
1492 1494
1493 1495 changedfiles = changedfiles.keys()
1494 1496 changedfiles.sort()
1495 1497 # Go through all our files in order sorted by name.
1496 1498 for fname in changedfiles:
1497 1499 filerevlog = self.file(fname)
1498 1500 # Toss out the filenodes that the recipient isn't really
1499 1501 # missing.
1500 1502 if msng_filenode_set.has_key(fname):
1501 1503 prune_filenodes(fname, filerevlog)
1502 1504 msng_filenode_lst = msng_filenode_set[fname].keys()
1503 1505 else:
1504 1506 msng_filenode_lst = []
1505 1507 # If any filenodes are left, generate the group for them,
1506 1508 # otherwise don't bother.
1507 1509 if len(msng_filenode_lst) > 0:
1508 1510 yield changegroup.genchunk(fname)
1509 1511 # Sort the filenodes by their revision #
1510 1512 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1511 1513 # Create a group generator and only pass in a changenode
1512 1514 # lookup function as we need to collect no information
1513 1515 # from filenodes.
1514 1516 group = filerevlog.group(msng_filenode_lst,
1515 1517 lookup_filenode_link_func(fname))
1516 1518 for chnk in group:
1517 1519 yield chnk
1518 1520 if msng_filenode_set.has_key(fname):
1519 1521 # Don't need this anymore, toss it to free memory.
1520 1522 del msng_filenode_set[fname]
1521 1523 # Signal that no more groups are left.
1522 1524 yield changegroup.closechunk()
1523 1525
1524 1526 if msng_cl_lst:
1525 1527 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1526 1528
1527 1529 return util.chunkbuffer(gengroup())
1528 1530
1529 1531 def changegroup(self, basenodes, source):
1530 1532 """Generate a changegroup of all nodes that we have that a recipient
1531 1533 doesn't.
1532 1534
1533 1535 This is much easier than the previous function as we can assume that
1534 1536 the recipient has any changenode we aren't sending them."""
1535 1537
1536 1538 self.hook('preoutgoing', throw=True, source=source)
1537 1539
1538 1540 cl = self.changelog
1539 1541 nodes = cl.nodesbetween(basenodes, None)[0]
1540 1542 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1541 1543
1542 1544 def identity(x):
1543 1545 return x
1544 1546
1545 1547 def gennodelst(revlog):
1546 1548 for r in xrange(0, revlog.count()):
1547 1549 n = revlog.node(r)
1548 1550 if revlog.linkrev(n) in revset:
1549 1551 yield n
1550 1552
1551 1553 def changed_file_collector(changedfileset):
1552 1554 def collect_changed_files(clnode):
1553 1555 c = cl.read(clnode)
1554 1556 for fname in c[3]:
1555 1557 changedfileset[fname] = 1
1556 1558 return collect_changed_files
1557 1559
1558 1560 def lookuprevlink_func(revlog):
1559 1561 def lookuprevlink(n):
1560 1562 return cl.node(revlog.linkrev(n))
1561 1563 return lookuprevlink
1562 1564
1563 1565 def gengroup():
1564 1566 # construct a list of all changed files
1565 1567 changedfiles = {}
1566 1568
1567 1569 for chnk in cl.group(nodes, identity,
1568 1570 changed_file_collector(changedfiles)):
1569 1571 yield chnk
1570 1572 changedfiles = changedfiles.keys()
1571 1573 changedfiles.sort()
1572 1574
1573 1575 mnfst = self.manifest
1574 1576 nodeiter = gennodelst(mnfst)
1575 1577 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1576 1578 yield chnk
1577 1579
1578 1580 for fname in changedfiles:
1579 1581 filerevlog = self.file(fname)
1580 1582 nodeiter = gennodelst(filerevlog)
1581 1583 nodeiter = list(nodeiter)
1582 1584 if nodeiter:
1583 1585 yield changegroup.genchunk(fname)
1584 1586 lookup = lookuprevlink_func(filerevlog)
1585 1587 for chnk in filerevlog.group(nodeiter, lookup):
1586 1588 yield chnk
1587 1589
1588 1590 yield changegroup.closechunk()
1589 1591
1590 1592 if nodes:
1591 1593 self.hook('outgoing', node=hex(nodes[0]), source=source)
1592 1594
1593 1595 return util.chunkbuffer(gengroup())
1594 1596
1595 1597 def addchangegroup(self, source, srctype, url):
1596 1598 """add changegroup to repo.
1597 1599 returns number of heads modified or added + 1."""
1598 1600
1599 1601 def csmap(x):
1600 1602 self.ui.debug(_("add changeset %s\n") % short(x))
1601 1603 return cl.count()
1602 1604
1603 1605 def revmap(x):
1604 1606 return cl.rev(x)
1605 1607
1606 1608 if not source:
1607 1609 return 0
1608 1610
1609 1611 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1610 1612
1611 1613 changesets = files = revisions = 0
1612 1614
1613 1615 tr = self.transaction()
1614 1616
1615 1617 # write changelog data to temp files so concurrent readers will not see
1616 1618 # inconsistent view
1617 1619 cl = None
1618 1620 try:
1619 1621 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1620 1622
1621 1623 oldheads = len(cl.heads())
1622 1624
1623 1625 # pull off the changeset group
1624 1626 self.ui.status(_("adding changesets\n"))
1625 1627 cor = cl.count() - 1
1626 1628 chunkiter = changegroup.chunkiter(source)
1627 1629 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1628 1630 raise util.Abort(_("received changelog group is empty"))
1629 1631 cnr = cl.count() - 1
1630 1632 changesets = cnr - cor
1631 1633
1632 1634 # pull off the manifest group
1633 1635 self.ui.status(_("adding manifests\n"))
1634 1636 chunkiter = changegroup.chunkiter(source)
1635 1637 # no need to check for empty manifest group here:
1636 1638 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1637 1639 # no new manifest will be created and the manifest group will
1638 1640 # be empty during the pull
1639 1641 self.manifest.addgroup(chunkiter, revmap, tr)
1640 1642
1641 1643 # process the files
1642 1644 self.ui.status(_("adding file changes\n"))
1643 1645 while 1:
1644 1646 f = changegroup.getchunk(source)
1645 1647 if not f:
1646 1648 break
1647 1649 self.ui.debug(_("adding %s revisions\n") % f)
1648 1650 fl = self.file(f)
1649 1651 o = fl.count()
1650 1652 chunkiter = changegroup.chunkiter(source)
1651 1653 if fl.addgroup(chunkiter, revmap, tr) is None:
1652 1654 raise util.Abort(_("received file revlog group is empty"))
1653 1655 revisions += fl.count() - o
1654 1656 files += 1
1655 1657
1656 1658 cl.writedata()
1657 1659 finally:
1658 1660 if cl:
1659 1661 cl.cleanup()
1660 1662
1661 1663 # make changelog see real files again
1662 1664 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1663 1665 self.changelog.checkinlinesize(tr)
1664 1666
1665 1667 newheads = len(self.changelog.heads())
1666 1668 heads = ""
1667 1669 if oldheads and newheads != oldheads:
1668 1670 heads = _(" (%+d heads)") % (newheads - oldheads)
1669 1671
1670 1672 self.ui.status(_("added %d changesets"
1671 1673 " with %d changes to %d files%s\n")
1672 1674 % (changesets, revisions, files, heads))
1673 1675
1674 1676 if changesets > 0:
1675 1677 self.hook('pretxnchangegroup', throw=True,
1676 1678 node=hex(self.changelog.node(cor+1)), source=srctype,
1677 1679 url=url)
1678 1680
1679 1681 tr.close()
1680 1682
1681 1683 if changesets > 0:
1682 1684 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1683 1685 source=srctype, url=url)
1684 1686
1685 1687 for i in range(cor + 1, cnr + 1):
1686 1688 self.hook("incoming", node=hex(self.changelog.node(i)),
1687 1689 source=srctype, url=url)
1688 1690
1689 1691 return newheads - oldheads + 1
1690 1692
1691 1693
1692 1694 def stream_in(self, remote):
1693 1695 fp = remote.stream_out()
1694 1696 resp = int(fp.readline())
1695 1697 if resp != 0:
1696 1698 raise util.Abort(_('operation forbidden by server'))
1697 1699 self.ui.status(_('streaming all changes\n'))
1698 1700 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1699 1701 self.ui.status(_('%d files to transfer, %s of data\n') %
1700 1702 (total_files, util.bytecount(total_bytes)))
1701 1703 start = time.time()
1702 1704 for i in xrange(total_files):
1703 1705 name, size = fp.readline().split('\0', 1)
1704 1706 size = int(size)
1705 1707 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1706 1708 ofp = self.opener(name, 'w')
1707 1709 for chunk in util.filechunkiter(fp, limit=size):
1708 1710 ofp.write(chunk)
1709 1711 ofp.close()
1710 1712 elapsed = time.time() - start
1711 1713 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1712 1714 (util.bytecount(total_bytes), elapsed,
1713 1715 util.bytecount(total_bytes / elapsed)))
1714 1716 self.reload()
1715 1717 return len(self.heads()) + 1
1716 1718
1717 1719 def clone(self, remote, heads=[], stream=False):
1718 1720 '''clone remote repository.
1719 1721
1720 1722 keyword arguments:
1721 1723 heads: list of revs to clone (forces use of pull)
1722 1724 stream: use streaming clone if possible'''
1723 1725
1724 1726 # now, all clients that can request uncompressed clones can
1725 1727 # read repo formats supported by all servers that can serve
1726 1728 # them.
1727 1729
1728 1730 # if revlog format changes, client will have to check version
1729 1731 # and format flags on "stream" capability, and use
1730 1732 # uncompressed only if compatible.
1731 1733
1732 1734 if stream and not heads and remote.capable('stream'):
1733 1735 return self.stream_in(remote)
1734 1736 return self.pull(remote, heads)
1735 1737
1736 1738 # used to avoid circular references so destructors work
1737 1739 def aftertrans(base):
1738 1740 p = base
1739 1741 def a():
1740 1742 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1741 1743 util.rename(os.path.join(p, "journal.dirstate"),
1742 1744 os.path.join(p, "undo.dirstate"))
1743 1745 return a
1744 1746
1745 1747 def instance(ui, path, create):
1746 1748 return localrepository(ui, util.drop_scheme('file', path), create)
1747 1749
1748 1750 def islocal(path):
1749 1751 return True
@@ -1,208 +1,214 b''
1 1 # sshrepo.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from remoterepo import *
10 10 from i18n import gettext as _
11 11 from demandload import *
12 12 demandload(globals(), "hg os re stat util")
13 13
14 14 class sshrepository(remoterepository):
15 15 def __init__(self, ui, path, create=0):
16 16 self._url = path
17 17 self.ui = ui
18 18
19 19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
20 20 if not m:
21 21 raise hg.RepoError(_("couldn't parse location %s") % path)
22 22
23 23 self.user = m.group(2)
24 24 self.host = m.group(3)
25 25 self.port = m.group(5)
26 26 self.path = m.group(7) or "."
27 27
28 28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
29 29 args = self.port and ("%s -p %s") % (args, self.port) or args
30 30
31 31 sshcmd = self.ui.config("ui", "ssh", "ssh")
32 32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
33 33
34 34 if create:
35 35 try:
36 36 self.validate_repo(ui, sshcmd, args, remotecmd)
37 return # the repo is good, nothing more to do
38 37 except hg.RepoError:
39 38 pass
39 else:
40 raise hg.RepoError(_("repository %s already exists") % path)
40 41
41 42 cmd = '%s %s "%s init %s"'
42 43 cmd = cmd % (sshcmd, args, remotecmd, self.path)
43 44
44 45 ui.note('running %s\n' % cmd)
45 46 res = os.system(cmd)
46 47 if res != 0:
47 48 raise hg.RepoError(_("could not create remote repo"))
48 49
49 50 self.validate_repo(ui, sshcmd, args, remotecmd)
50 51
51 52 def url(self):
52 53 return self._url
53 54
54 55 def validate_repo(self, ui, sshcmd, args, remotecmd):
56 # cleanup up previous run
57 self.cleanup()
58
55 59 cmd = '%s %s "%s -R %s serve --stdio"'
56 60 cmd = cmd % (sshcmd, args, remotecmd, self.path)
57 61
58 62 ui.note('running %s\n' % cmd)
59 63 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
60 64
61 65 # skip any noise generated by remote shell
62 66 self.do_cmd("hello")
63 67 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
64 68 lines = ["", "dummy"]
65 69 max_noise = 500
66 70 while lines[-1] and max_noise:
67 71 l = r.readline()
68 72 self.readerr()
69 73 if lines[-1] == "1\n" and l == "\n":
70 74 break
71 75 if l:
72 76 ui.debug(_("remote: "), l)
73 77 lines.append(l)
74 78 max_noise -= 1
75 79 else:
76 80 raise hg.RepoError(_("no response from remote hg"))
77 81
78 82 self.capabilities = ()
79 83 lines.reverse()
80 84 for l in lines:
81 85 if l.startswith("capabilities:"):
82 86 self.capabilities = l[:-1].split(":")[1].split()
83 87 break
84 88
85 89 def readerr(self):
86 90 while 1:
87 91 size = util.fstat(self.pipee).st_size
88 92 if size == 0: break
89 93 l = self.pipee.readline()
90 94 if not l: break
91 95 self.ui.status(_("remote: "), l)
92 96
93 def __del__(self):
97 def cleanup(self):
94 98 try:
95 99 self.pipeo.close()
96 100 self.pipei.close()
97 101 # read the error descriptor until EOF
98 102 for l in self.pipee:
99 103 self.ui.status(_("remote: "), l)
100 104 self.pipee.close()
101 105 except:
102 106 pass
103 107
108 __del__ = cleanup
109
104 110 def do_cmd(self, cmd, **args):
105 111 self.ui.debug(_("sending %s command\n") % cmd)
106 112 self.pipeo.write("%s\n" % cmd)
107 113 for k, v in args.items():
108 114 self.pipeo.write("%s %d\n" % (k, len(v)))
109 115 self.pipeo.write(v)
110 116 self.pipeo.flush()
111 117
112 118 return self.pipei
113 119
114 120 def call(self, cmd, **args):
115 121 r = self.do_cmd(cmd, **args)
116 122 l = r.readline()
117 123 self.readerr()
118 124 try:
119 125 l = int(l)
120 126 except:
121 127 raise hg.RepoError(_("unexpected response '%s'") % l)
122 128 return r.read(l)
123 129
124 130 def lock(self):
125 131 self.call("lock")
126 132 return remotelock(self)
127 133
128 134 def unlock(self):
129 135 self.call("unlock")
130 136
131 137 def heads(self):
132 138 d = self.call("heads")
133 139 try:
134 140 return map(bin, d[:-1].split(" "))
135 141 except:
136 142 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
137 143
138 144 def branches(self, nodes):
139 145 n = " ".join(map(hex, nodes))
140 146 d = self.call("branches", nodes=n)
141 147 try:
142 148 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
143 149 return br
144 150 except:
145 151 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
146 152
147 153 def between(self, pairs):
148 154 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
149 155 d = self.call("between", pairs=n)
150 156 try:
151 157 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
152 158 return p
153 159 except:
154 160 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
155 161
156 162 def changegroup(self, nodes, kind):
157 163 n = " ".join(map(hex, nodes))
158 164 return self.do_cmd("changegroup", roots=n)
159 165
160 166 def unbundle(self, cg, heads, source):
161 167 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
162 168 if d:
163 169 raise hg.RepoError(_("push refused: %s") % d)
164 170
165 171 while 1:
166 172 d = cg.read(4096)
167 173 if not d: break
168 174 self.pipeo.write(str(len(d)) + '\n')
169 175 self.pipeo.write(d)
170 176 self.readerr()
171 177
172 178 self.pipeo.write('0\n')
173 179 self.pipeo.flush()
174 180
175 181 self.readerr()
176 182 d = self.pipei.readline()
177 183 if d != '\n':
178 184 return 1
179 185
180 186 l = int(self.pipei.readline())
181 187 r = self.pipei.read(l)
182 188 if not r:
183 189 return 1
184 190 return int(r)
185 191
186 192 def addchangegroup(self, cg, source, url):
187 193 d = self.call("addchangegroup")
188 194 if d:
189 195 raise hg.RepoError(_("push refused: %s") % d)
190 196 while 1:
191 197 d = cg.read(4096)
192 198 if not d: break
193 199 self.pipeo.write(d)
194 200 self.readerr()
195 201
196 202 self.pipeo.flush()
197 203
198 204 self.readerr()
199 205 l = int(self.pipei.readline())
200 206 r = self.pipei.read(l)
201 207 if not r:
202 208 return 1
203 209 return int(r)
204 210
205 211 def stream_out(self):
206 212 return self.do_cmd('stream_out')
207 213
208 214 instance = sshrepository
@@ -1,323 +1,323 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import gettext as _
9 9 from demandload import *
10 10 demandload(globals(), "errno getpass os re socket sys tempfile")
11 11 demandload(globals(), "ConfigParser mdiff templater traceback util")
12 12
13 13 class ui(object):
14 14 def __init__(self, verbose=False, debug=False, quiet=False,
15 15 interactive=True, traceback=False, parentui=None):
16 16 self.overlay = {}
17 17 if parentui is None:
18 18 # this is the parent of all ui children
19 19 self.parentui = None
20 20 self.readhooks = []
21 21 self.trusted_users = {}
22 22 self.trusted_groups = {}
23 23 self.cdata = ConfigParser.SafeConfigParser()
24 24 self.readconfig(util.rcpath())
25 25
26 26 self.quiet = self.configbool("ui", "quiet")
27 27 self.verbose = self.configbool("ui", "verbose")
28 28 self.debugflag = self.configbool("ui", "debug")
29 29 self.interactive = self.configbool("ui", "interactive", True)
30 30 self.traceback = traceback
31 31
32 32 self.updateopts(verbose, debug, quiet, interactive)
33 33 self.diffcache = None
34 34 self.header = []
35 35 self.prev_header = []
36 36 self.revlogopts = self.configrevlog()
37 37 else:
38 38 # parentui may point to an ui object which is already a child
39 39 self.parentui = parentui.parentui or parentui
40 40 self.readhooks = parentui.readhooks[:]
41 41 self.trusted_users = parentui.trusted_users.copy()
42 42 self.trusted_groups = parentui.trusted_groups.copy()
43 43 parent_cdata = self.parentui.cdata
44 44 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
45 45 # make interpolation work
46 46 for section in parent_cdata.sections():
47 47 self.cdata.add_section(section)
48 48 for name, value in parent_cdata.items(section, raw=True):
49 49 self.cdata.set(section, name, value)
50 50
51 51 def __getattr__(self, key):
52 52 return getattr(self.parentui, key)
53 53
54 54 def updateopts(self, verbose=False, debug=False, quiet=False,
55 55 interactive=True, traceback=False, config=[]):
56 56 self.quiet = (self.quiet or quiet) and not verbose and not debug
57 self.verbose = (self.verbose or verbose) or debug
57 self.verbose = ((self.verbose or verbose) or debug) and not self.quiet
58 58 self.debugflag = (self.debugflag or debug)
59 59 self.interactive = (self.interactive and interactive)
60 60 self.traceback = self.traceback or traceback
61 61 for cfg in config:
62 62 try:
63 63 name, value = cfg.split('=', 1)
64 64 section, name = name.split('.', 1)
65 65 if not self.cdata.has_section(section):
66 66 self.cdata.add_section(section)
67 67 if not section or not name:
68 68 raise IndexError
69 69 self.cdata.set(section, name, value)
70 70 except (IndexError, ValueError):
71 71 raise util.Abort(_('malformed --config option: %s') % cfg)
72 72
73 73 def readconfig(self, fn, root=None):
74 74 if isinstance(fn, basestring):
75 75 fn = [fn]
76 76 for f in fn:
77 77 try:
78 78 fp = open(f)
79 79 except IOError:
80 80 continue
81 81 if ((self.trusted_users or self.trusted_groups) and
82 82 '*' not in self.trusted_users and
83 83 '*' not in self.trusted_groups):
84 84 st = util.fstat(fp)
85 85 user = util.username(st.st_uid)
86 86 group = util.groupname(st.st_gid)
87 87 if (user not in self.trusted_users and
88 88 group not in self.trusted_groups):
89 89 self.warn(_('not reading file %s from untrusted '
90 90 'user %s, group %s\n') % (f, user, group))
91 91 continue
92 92 try:
93 93 self.cdata.readfp(fp, f)
94 94 except ConfigParser.ParsingError, inst:
95 95 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
96 96 # translate paths relative to root (or home) into absolute paths
97 97 if root is None:
98 98 root = os.path.expanduser('~')
99 99 for name, path in self.configitems("paths"):
100 100 if path and "://" not in path and not os.path.isabs(path):
101 101 self.cdata.set("paths", name, os.path.join(root, path))
102 102 user = util.username()
103 103 if user is not None:
104 104 self.trusted_users[user] = 1
105 105 for user in self.configlist('trusted', 'users'):
106 106 self.trusted_users[user] = 1
107 107 for group in self.configlist('trusted', 'groups'):
108 108 self.trusted_groups[group] = 1
109 109 for hook in self.readhooks:
110 110 hook(self)
111 111
112 112 def addreadhook(self, hook):
113 113 self.readhooks.append(hook)
114 114
115 115 def setconfig(self, section, name, val):
116 116 self.overlay[(section, name)] = val
117 117
118 118 def config(self, section, name, default=None):
119 119 if self.overlay.has_key((section, name)):
120 120 return self.overlay[(section, name)]
121 121 if self.cdata.has_option(section, name):
122 122 try:
123 123 return self.cdata.get(section, name)
124 124 except ConfigParser.InterpolationError, inst:
125 125 raise util.Abort(_("Error in configuration:\n%s") % inst)
126 126 if self.parentui is None:
127 127 return default
128 128 else:
129 129 return self.parentui.config(section, name, default)
130 130
131 131 def configlist(self, section, name, default=None):
132 132 """Return a list of comma/space separated strings"""
133 133 result = self.config(section, name)
134 134 if result is None:
135 135 result = default or []
136 136 if isinstance(result, basestring):
137 137 result = result.replace(",", " ").split()
138 138 return result
139 139
140 140 def configbool(self, section, name, default=False):
141 141 if self.overlay.has_key((section, name)):
142 142 return self.overlay[(section, name)]
143 143 if self.cdata.has_option(section, name):
144 144 try:
145 145 return self.cdata.getboolean(section, name)
146 146 except ConfigParser.InterpolationError, inst:
147 147 raise util.Abort(_("Error in configuration:\n%s") % inst)
148 148 if self.parentui is None:
149 149 return default
150 150 else:
151 151 return self.parentui.configbool(section, name, default)
152 152
153 153 def has_config(self, section):
154 154 '''tell whether section exists in config.'''
155 155 return self.cdata.has_section(section)
156 156
157 157 def configitems(self, section):
158 158 items = {}
159 159 if self.parentui is not None:
160 160 items = dict(self.parentui.configitems(section))
161 161 if self.cdata.has_section(section):
162 162 try:
163 163 items.update(dict(self.cdata.items(section)))
164 164 except ConfigParser.InterpolationError, inst:
165 165 raise util.Abort(_("Error in configuration:\n%s") % inst)
166 166 x = items.items()
167 167 x.sort()
168 168 return x
169 169
170 170 def walkconfig(self, seen=None):
171 171 if seen is None:
172 172 seen = {}
173 173 for (section, name), value in self.overlay.iteritems():
174 174 yield section, name, value
175 175 seen[section, name] = 1
176 176 for section in self.cdata.sections():
177 177 for name, value in self.cdata.items(section):
178 178 if (section, name) in seen: continue
179 179 yield section, name, value.replace('\n', '\\n')
180 180 seen[section, name] = 1
181 181 if self.parentui is not None:
182 182 for parent in self.parentui.walkconfig(seen):
183 183 yield parent
184 184
185 185 def extensions(self):
186 186 result = self.configitems("extensions")
187 187 for i, (key, value) in enumerate(result):
188 188 if value:
189 189 result[i] = (key, os.path.expanduser(value))
190 190 return result
191 191
192 192 def hgignorefiles(self):
193 193 result = []
194 194 for key, value in self.configitems("ui"):
195 195 if key == 'ignore' or key.startswith('ignore.'):
196 196 result.append(os.path.expanduser(value))
197 197 return result
198 198
199 199 def configrevlog(self):
200 200 result = {}
201 201 for key, value in self.configitems("revlog"):
202 202 result[key.lower()] = value
203 203 return result
204 204
205 205 def username(self):
206 206 """Return default username to be used in commits.
207 207
208 208 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
209 209 and stop searching if one of these is set.
210 210 Abort if found username is an empty string to force specifying
211 211 the commit user elsewhere, e.g. with line option or repo hgrc.
212 212 If not found, use ($LOGNAME or $USER or $LNAME or
213 213 $USERNAME) +"@full.hostname".
214 214 """
215 215 user = os.environ.get("HGUSER")
216 216 if user is None:
217 217 user = self.config("ui", "username")
218 218 if user is None:
219 219 user = os.environ.get("EMAIL")
220 220 if user is None:
221 221 try:
222 222 user = '%s@%s' % (util.getuser(), socket.getfqdn())
223 223 except KeyError:
224 224 raise util.Abort(_("Please specify a username."))
225 225 return user
226 226
227 227 def shortuser(self, user):
228 228 """Return a short representation of a user name or email address."""
229 229 if not self.verbose: user = util.shortuser(user)
230 230 return user
231 231
232 232 def expandpath(self, loc, default=None):
233 233 """Return repository location relative to cwd or from [paths]"""
234 234 if "://" in loc or os.path.isdir(loc):
235 235 return loc
236 236
237 237 path = self.config("paths", loc)
238 238 if not path and default is not None:
239 239 path = self.config("paths", default)
240 240 return path or loc
241 241
242 242 def write(self, *args):
243 243 if self.header:
244 244 if self.header != self.prev_header:
245 245 self.prev_header = self.header
246 246 self.write(*self.header)
247 247 self.header = []
248 248 for a in args:
249 249 sys.stdout.write(str(a))
250 250
251 251 def write_header(self, *args):
252 252 for a in args:
253 253 self.header.append(str(a))
254 254
255 255 def write_err(self, *args):
256 256 try:
257 257 if not sys.stdout.closed: sys.stdout.flush()
258 258 for a in args:
259 259 sys.stderr.write(str(a))
260 260 except IOError, inst:
261 261 if inst.errno != errno.EPIPE:
262 262 raise
263 263
264 264 def flush(self):
265 265 try: sys.stdout.flush()
266 266 except: pass
267 267 try: sys.stderr.flush()
268 268 except: pass
269 269
270 270 def readline(self):
271 271 return sys.stdin.readline()[:-1]
272 272 def prompt(self, msg, pat=None, default="y"):
273 273 if not self.interactive: return default
274 274 while 1:
275 275 self.write(msg, " ")
276 276 r = self.readline()
277 277 if not pat or re.match(pat, r):
278 278 return r
279 279 else:
280 280 self.write(_("unrecognized response\n"))
281 281 def getpass(self, prompt=None, default=None):
282 282 if not self.interactive: return default
283 283 return getpass.getpass(prompt or _('password: '))
284 284 def status(self, *msg):
285 285 if not self.quiet: self.write(*msg)
286 286 def warn(self, *msg):
287 287 self.write_err(*msg)
288 288 def note(self, *msg):
289 289 if self.verbose: self.write(*msg)
290 290 def debug(self, *msg):
291 291 if self.debugflag: self.write(*msg)
292 292 def edit(self, text, user):
293 293 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
294 294 text=True)
295 295 try:
296 296 f = os.fdopen(fd, "w")
297 297 f.write(text)
298 298 f.close()
299 299
300 300 editor = (os.environ.get("HGEDITOR") or
301 301 self.config("ui", "editor") or
302 302 os.environ.get("EDITOR", "vi"))
303 303
304 304 util.system("%s \"%s\"" % (editor, name),
305 305 environ={'HGUSER': user},
306 306 onerr=util.Abort, errprefix=_("edit failed"))
307 307
308 308 f = open(name)
309 309 t = f.read()
310 310 f.close()
311 311 t = re.sub("(?m)^HG:.*\n", "", t)
312 312 finally:
313 313 os.unlink(name)
314 314
315 315 return t
316 316
317 317 def print_exc(self):
318 318 '''print exception traceback if traceback printing enabled.
319 319 only to call in exception handler. returns true if traceback
320 320 printed.'''
321 321 if self.traceback:
322 322 traceback.print_exc()
323 323 return self.traceback
@@ -1,50 +1,50 b''
1 1 default = 'summary'
2 2 header = header-gitweb.tmpl
3 3 footer = footer-gitweb.tmpl
4 4 search = search-gitweb.tmpl
5 5 changelog = changelog-gitweb.tmpl
6 6 summary = summary-gitweb.tmpl
7 7 error = error-gitweb.tmpl
8 8 naventry = '<a href="?cmd=changelog;rev=#rev#;style=gitweb">#label|escape#</a> '
9 9 navshortentry = '<a href="?cmd=shortlog;rev=#rev#;style=gitweb">#label|escape#</a> '
10 10 filedifflink = '<a href="?cmd=filediff;node=#node#;file=#file|urlescape#;style=gitweb">#file|escape#</a> '
11 filenodelink = '<tr class="light"><td><a class="list" href="">#file|escape#</a></td><td></td><td class="link"><a href="?cmd=file;filenode=#filenode#;file=#file|urlescape#;style=gitweb">file</a> | <!-- FIXME: <a href="?fd=#filenode|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?cmd=filelog;filenode=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a></td></tr>'
11 filenodelink = '<tr class="light"><td><a class="list" href="">#file|escape#</a></td><td></td><td class="link"><a href="?cmd=file;filenode=#filenode#;file=#file|urlescape#;style=gitweb">file</a> | <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> | <!-- FIXME: <a href="?fd=#filenode|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?cmd=filelog;filenode=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a></td></tr>'
12 12 fileellipses = '...'
13 13 changelogentry = changelogentry-gitweb.tmpl
14 14 searchentry = changelogentry-gitweb.tmpl
15 15 changeset = changeset-gitweb.tmpl
16 16 manifest = manifest-gitweb.tmpl
17 17 manifestdirentry = '<tr class="parity#parity#"><td style="font-family:monospace">drwxr-xr-x</td><td><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">#basename|escape#/</a></td><td class="link"><a href="?mf=#manifest|short#;path=#path|urlescape#;style=gitweb">manifest</a></td></tr>'
18 18 manifestfileentry = '<tr class="parity#parity#"><td style="font-family:monospace">#permissions|permissions#</td><td class="list"><a class="list" href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">#basename|escape#</a></td><td class="link"><a href="?f=#filenode|short#;file=#file|urlescape#;style=gitweb">file</a> | <a href="?fl=#filenode|short#;file=#file|urlescape#;style=gitweb">revisions</a> | <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a></td></tr>'
19 19 filerevision = filerevision-gitweb.tmpl
20 20 fileannotate = fileannotate-gitweb.tmpl
21 21 filelog = filelog-gitweb.tmpl
22 22 fileline = '<div style="font-family:monospace" class="parity#parity#"><pre><span class="linenr"> #linenumber#</span> #line|escape#</pre></div>'
23 23 annotateline = '<tr style="font-family:monospace" class="parity#parity#"><td class="linenr" style="text-align: right;"><a href="?cs=#node|short#;style=gitweb">#author|obfuscate#@#rev#</a></td><td><pre>#line|escape#</pre></td></tr>'
24 24 difflineplus = '<div style="color:#008800;">#line|escape#</div>'
25 25 difflineminus = '<div style="color:#cc0000;">#line|escape#</div>'
26 26 difflineat = '<div style="color:#990099;">#line|escape#</div>'
27 27 diffline = '<div>#line|escape#</div>'
28 28 changelogparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
29 29 changesetparent = '<tr><td>parent</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
30 30 filerevparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
31 31 filerename = '<tr><td class="metatag">parent:</td><td><a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">#file|escape#@#node|short#</a></td></tr>'
32 32 filelogrename = '| <a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">base</a>'
33 33 fileannotateparent = '<tr><td class="metatag">parent:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
34 34 changelogchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
35 35 changesetchild = '<tr><td>child</td><td style="font-family:monospace"><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb">#node|short#</a></td></tr>'
36 36 filerevchild = '<tr><td class="metatag">child:</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
37 37 fileannotatechild = '<tr><td class="metatag">child:</td><td><a href="?cmd=annotate;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
38 38 tags = tags-gitweb.tmpl
39 39 tagentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#tag|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=changelog;rev=#node|short#;style=gitweb">changelog</a> | <a href="?mf=#tagmanifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
40 40 diffblock = '<pre>#lines#</pre>'
41 41 changelogtag = '<tr><th class="tag">tag:</th><td class="tag">#tag|escape#</td></tr>'
42 42 changesettag = '<tr><td>tag</td><td>#tag|escape#</td></tr>'
43 43 filediffparent = '<tr><th class="parent">parent #rev#:</th><td class="parent"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
44 44 filelogparent = '<tr><td align="right">parent #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
45 45 filediffchild = '<tr><th class="child">child #rev#:</th><td class="child"><a href="?cmd=changeset;node=#node#;style=gitweb">#node|short#</a></td></tr>'
46 46 filelogchild = '<tr><td align="right">child #rev#:&nbsp;</td><td><a href="?cmd=file;file=#file|urlescape#;filenode=#node#;style=gitweb">#node|short#</a></td></tr>'
47 47 shortlog = shortlog-gitweb.tmpl
48 48 shortlogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><i>#author#</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><a href="?cmd=changeset;node=#node|short#;style=gitweb">changeset</a> | <a href="?cmd=manifest;manifest=#manifest|short#;path=/;style=gitweb">manifest</a></td></tr>'
49 filelogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><!-- FIXME: <a href="?fd=#node|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> #rename%filelogrename#</td></tr>'
49 filelogentry = '<tr class="parity#parity#"><td class="age"><i>#date|age# ago</i></td><td><a class="list" href="?cmd=changeset;node=#node|short#;style=gitweb"><b>#desc|strip|firstline|escape#</b></a></td><td class="link"><a href="?f=#node|short#;file=#file|urlescape#;style=gitweb">file</a> | <!-- FIXME: <a href="?fd=#node|short#;file=#file|urlescape#;style=gitweb">diff</a> | --> <a href="?fa=#filenode|short#;file=#file|urlescape#;style=gitweb">annotate</a> #rename%filelogrename#</td></tr>'
50 50 archiveentry = ' | <a href="?ca=#node|short#;type=#type|urlescape#">#type|escape#</a> '
@@ -1,52 +1,61 b''
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 21 $2
22 22 EOF
23 23 chmod +x dummyssh
24 24
25 25 echo "# creating 'local'"
26 26 hg init local
27 27 echo this > local/foo
28 28 hg ci --cwd local -A -m "init" -d "1000000 0"
29 29
30 echo "#test failure"
31 hg init local
32
30 33 echo "# init+push to remote2"
31 34 hg init -e ./dummyssh ssh://user@dummy/remote2
32 35 hg incoming -R remote2 local
33 36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
34 37
35 38 echo "# clone to remote1"
36 39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
37 40
41 echo "# init to existing repo"
42 hg init -e ./dummyssh ssh://user@dummy/remote1
43
44 echo "# clone to existing repo"
45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46
38 47 echo "# output of dummyssh"
39 48 cat dummylog
40 49
41 50 echo "# comparing repositories"
42 51 hg tip -q -R local
43 52 hg tip -q -R remote1
44 53 hg tip -q -R remote2
45 54
46 55 echo "# check names for repositories (clashes with URL schemes, special chars)"
47 56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
48 57 echo "# hg init \"$i\""
49 58 hg init "$i"
50 59 test -d "$i" -a -d "$i/.hg" -a -d "$i/.hg/data" && echo "ok" || echo "failed"
51 60 done
52 61
@@ -1,56 +1,64 b''
1 1 # creating 'local'
2 2 adding foo
3 #test failure
4 abort: repository local already exists!
3 5 # init+push to remote2
6 remote: abort: repository remote2 not found!
4 7 changeset: 0:c4e059d443be
5 8 tag: tip
6 9 user: test
7 10 date: Mon Jan 12 13:46:40 1970 +0000
8 11 summary: init
9 12
10 13 pushing to ssh://user@dummy/remote2
11 14 searching for changes
12 15 remote: adding changesets
13 16 remote: adding manifests
14 17 remote: adding file changes
15 18 remote: added 1 changesets with 1 changes to 1 files
16 19 # clone to remote1
20 remote: abort: repository remote1 not found!
17 21 searching for changes
18 remote: abort: repository remote1 not found!
19 22 remote: adding changesets
20 23 remote: adding manifests
21 24 remote: adding file changes
22 25 remote: added 1 changesets with 1 changes to 1 files
26 # init to existing repo
27 abort: repository ssh://user@dummy/remote1 already exists!
28 # clone to existing repo
29 abort: repository ssh://user@dummy/remote1 already exists!
23 30 # output of dummyssh
24 31 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
25 32 Got arguments 1:user@dummy 2:hg init remote2 3: 4: 5:
26 33 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
27 34 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio 3: 4: 5:
28 35 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
36 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
29 37 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
30 Got arguments 1:user@dummy 2:hg init remote1 3: 4: 5:
38 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
31 39 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio 3: 4: 5:
32 40 # comparing repositories
33 41 0:c4e059d443be
34 42 0:c4e059d443be
35 43 0:c4e059d443be
36 44 # check names for repositories (clashes with URL schemes, special chars)
37 45 # hg init "bundle"
38 46 ok
39 47 # hg init "file"
40 48 ok
41 49 # hg init "hg"
42 50 ok
43 51 # hg init "http"
44 52 ok
45 53 # hg init "https"
46 54 ok
47 55 # hg init "old-http"
48 56 ok
49 57 # hg init "ssh"
50 58 ok
51 59 # hg init "static-http"
52 60 ok
53 61 # hg init " "
54 62 ok
55 63 # hg init "with space"
56 64 ok
General Comments 0
You need to be logged in to leave comments. Login now