[140719] trunk/dports/science/htcondor
aronnax at macports.org
aronnax at macports.org
Wed Sep 30 21:00:22 PDT 2015
Revision: 140719
https://trac.macports.org/changeset/140719
Author: aronnax at macports.org
Date: 2015-09-30 21:00:22 -0700 (Wed, 30 Sep 2015)
Log Message:
-----------
htcondor: update to 8.4.0
Modified Paths:
--------------
trunk/dports/science/htcondor/Portfile
trunk/dports/science/htcondor/files/condor_config
trunk/dports/science/htcondor/files/patch-build-cmake-CondorPackageConfig.cmake.diff
trunk/dports/science/htcondor/files/patch-src-condor_scripts-macosx_rewrite_libs.diff
trunk/dports/science/htcondor/files/patch-src-condor_utils-condor_config.cpp.diff
trunk/dports/science/htcondor/files/patch-src-python-bindings-CMakeLists.txt.diff
Modified: trunk/dports/science/htcondor/Portfile
===================================================================
--- trunk/dports/science/htcondor/Portfile 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/Portfile 2015-10-01 04:00:22 UTC (rev 140719)
@@ -7,9 +7,8 @@
PortGroup active_variants 1.1
PortGroup conflicts_build 1.0
-github.setup htcondor htcondor 8_2_3 V
+github.setup htcondor htcondor 8_4_0 V
version [strsed ${github.version} g/_/\./]
-revision 1
maintainers aronnax
categories science parallel net
@@ -87,8 +86,8 @@
}
-checksums rmd160 9bd1f88821b891c341aae71e1bfcf7a70af296d0 \
- sha256 d1e426f6dc20e5a8afeda16273d26f1d22891b2762c9369c97e438c4f77af6a9
+checksums rmd160 3f253ceadf68b14adc76b83bf6165e4b93974bcc \
+ sha256 95abe172abd8ebf42333c8267972babba097ff5309f98e201b8e364f2a3add57
depends_build-append port:latex2html
@@ -113,7 +112,7 @@
}
# FIXME: Globus support is disabled because Globus is not available in MacPorts.
-configure.args-append -DWITH_GLOBUS:BOOL="0" -DBUILD_TESTING:BOOL="0"
+configure.args-append -DWITH_GLOBUS:BOOL="0" -DWITH_VOMS:BOOL="0" -DWITH_GSOAP:BOOL="0" -DBUILD_TESTING:BOOL="0"
# Man pages are not part of the default build target.
post-build {
Modified: trunk/dports/science/htcondor/files/condor_config
===================================================================
--- trunk/dports/science/htcondor/files/condor_config 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/files/condor_config 2015-10-01 04:00:22 UTC (rev 140719)
@@ -49,6 +49,9 @@
## What machine is your central manager?
CONDOR_HOST = 127.0.0.1
+## Don't use DNS.
+NO_DNS = True
+
##--------------------------------------------------------------------
## Pathnames:
##--------------------------------------------------------------------
@@ -144,7 +147,7 @@
## FLOCK_FROM defines the machines where you would like to grant
## people access to your pool via flocking. (i.e. you are granting
## access to these machines to join your pool).
-FLOCK_FROM =
+#FLOCK_FROM =
## An example of this is:
#FLOCK_FROM = somehost.friendly.domain, anotherhost.friendly.domain
@@ -152,7 +155,7 @@
## to flock to. (i.e. you are specifying the machines that you
## want your jobs to be negotiated at -- thereby specifying the
## pools they will run in.)
-FLOCK_TO =
+#FLOCK_TO =
## An example of this is:
#FLOCK_TO = central_manager.friendly.domain, condor.cs.wisc.edu
@@ -271,7 +274,7 @@
## default values for any of these settings. If they are not
## defined, no attributes can be set with condor_config_val.
-## Do you want to allow condor_config_val -reset to work at all?
+## Do you want to allow condor_config_val -rset to work at all?
## This feature is disabled by default, so to enable, you must
## uncomment the following setting and change the value to "True".
## Note: changing this requires a restart not just a reconfig.
@@ -334,7 +337,7 @@
## Checkpoint server:
##--------------------------------------------------------------------
## Do you want to use a checkpoint server if one is available? If a
-## checkpoint server isn't available or USE_CKPT_SERVER is set to
+## checkpoint server is not available or USE_CKPT_SERVER is set to
## False, checkpoints will be written to the local SPOOL directory on
## the submission machine.
#USE_CKPT_SERVER = True
@@ -398,25 +401,25 @@
#MAX_CONCURRENT_UPLOADS = 10
## Condor needs to create a few lock files to synchronize access to
-## various log files. Because of problems we've had with network
+## various log files. Because of problems we had with network
## filesystems and file locking over the years, we HIGHLY recommend
## that you put these lock files on a local partition on each
## machine. If you don't have your LOCAL_DIR on a local partition,
## be sure to change this entry. Whatever user (or group) condor is
## running as needs to have write access to this directory. If
## you're not running as root, this is whatever user you started up
-## the condor_master as. If you are running as root, and there's a
-## condor account, it's probably condor. Otherwise, it's whatever
+## the condor_master as. If you are running as root, and there is a
+## condor account, it is probably condor. Otherwise, it is whatever
## you've set in the CONDOR_IDS environment variable. See the Admin
## manual for details on this.
LOCK = $(LOG)
## If you don't use a fully qualified name in your /etc/hosts file
## (or NIS, etc.) for either your official hostname or as an alias,
-## Condor wouldn't normally be able to use fully qualified names in
+## Condor would not normally be able to use fully qualified names in
## places that it'd like to. You can set this parameter to the
## domain you'd like appended to your hostname, if changing your host
-## information isn't a good option. This parameter must be set in
+## information is not a good option. This parameter must be set in
## the global config file (not the LOCAL_CONFIG_FILE from above).
#DEFAULT_DOMAIN_NAME = your.domain.name
@@ -435,7 +438,7 @@
## we don't do anything, and leave in place whatever limit was in
## effect when you started the Condor daemons. If this parameter is
## set and "True", we increase the limit to as large as it gets. If
-## it's set to "False", we set the limit at 0 (which means that no
+## it is set to "False", we set the limit at 0 (which means that no
## core files are even created). Core files greatly help the Condor
## developers debug any problems you might be having.
#CREATE_CORE_FILES = True
@@ -448,27 +451,6 @@
## want.
#ABORT_ON_EXCEPTION = False
-## Condor Glidein downloads binaries from a remote server for the
-## machines into which you're gliding. This saves you from manually
-## downloading and installing binaries for every architecture you
-## might want to glidein to. The default server is one maintained at
-## The University of Wisconsin. If you don't want to use the UW
-## server, you can set up your own and change the following to
-## point to it, instead.
-GLIDEIN_SERVER_URLS = \
- http://www.cs.wisc.edu/condor/glidein/binaries
-
-## List the sites you want to GlideIn to on the GLIDEIN_SITES. For example,
-## if you'd like to GlideIn to some Alliance GiB resources,
-## uncomment the line below.
-## Make sure that $(GLIDEIN_SITES) is included in ALLOW_READ and
-## ALLOW_WRITE, or else your GlideIns won't be able to join your pool.
-## This is _NOT_ done for you by default, because it is an even better
-## idea to use a strong security method (such as GSI) rather than
-## host-based security for authorizing glideins.
-#GLIDEIN_SITES = *.ncsa.uiuc.edu, *.cs.wisc.edu, *.mcs.anl.gov
-#GLIDEIN_SITES =
-
## If your site needs to use UID_DOMAIN settings (defined above) that
## are not real Internet domains that match the hostnames, you can
## tell Condor to trust whatever UID_DOMAIN a submit machine gives to
@@ -521,7 +503,7 @@
## credentials when sending them over the wire between daemons.
## Delegation can take up to a second, which is very slow when
## submitting a large number of jobs. Copying exposes the credential
-## to third parties if Condor isn't set to encrypt communications.
+## to third parties if Condor is not set to encrypt communications.
## By default, Condor will delegate rather than copy.
#DELEGATE_JOB_GSI_CREDENTIALS = True
@@ -555,6 +537,7 @@
## chooses a network interface automatically. It tries to choose a public
## interface if one is available. If it cannot decide which of two interfaces
## to choose from, it will pick the first one.
+BIND_ALL_INTERFACES = False
NETWORK_INTERFACE = 127.0.0.1
##--------------------------------------------------------------------
@@ -565,53 +548,54 @@
## The flags given in ALL_DEBUG are shared between all daemons.
##
-ALL_DEBUG =
+#ALL_DEBUG =
+#MAX_DEFAULT_LOG = 10 Mb
-MAX_COLLECTOR_LOG = 1000000
-COLLECTOR_DEBUG =
+#MAX_COLLECTOR_LOG = $(MAX_DEFAULT_LOG)
+#COLLECTOR_DEBUG =
-MAX_KBDD_LOG = 1000000
-KBDD_DEBUG =
+#MAX_KBDD_LOG = $(MAX_DEFAULT_LOG)
+#KBDD_DEBUG =
-MAX_NEGOTIATOR_LOG = 1000000
-NEGOTIATOR_DEBUG = D_MATCH
-MAX_NEGOTIATOR_MATCH_LOG = 1000000
+#MAX_NEGOTIATOR_LOG = $(MAX_DEFAULT_LOG)
+#NEGOTIATOR_DEBUG = D_MATCH
+#MAX_NEGOTIATOR_MATCH_LOG = $(MAX_DEFAULT_LOG)
-MAX_SCHEDD_LOG = 1000000
-SCHEDD_DEBUG = D_PID
+#MAX_SCHEDD_LOG = $(MAX_DEFAULT_LOG)
+#SCHEDD_DEBUG = D_PID
-MAX_SHADOW_LOG = 1000000
-SHADOW_DEBUG =
+#MAX_SHADOW_LOG = $(MAX_DEFAULT_LOG)
+#SHADOW_DEBUG =
-MAX_STARTD_LOG = 1000000
-STARTD_DEBUG =
+#MAX_STARTD_LOG = $(MAX_DEFAULT_LOG)
+#STARTD_DEBUG =
-MAX_STARTER_LOG = 1000000
+#MAX_STARTER_LOG = $(MAX_DEFAULT_LOG)
-MAX_MASTER_LOG = 1000000
-MASTER_DEBUG =
-## When the master starts up, should it truncate it's log file?
+#MAX_MASTER_LOG = $(MAX_DEFAULT_LOG)
+#MASTER_DEBUG =
+## Truncates master log start up?
#TRUNC_MASTER_LOG_ON_OPEN = False
-MAX_JOB_ROUTER_LOG = 1000000
-JOB_ROUTER_DEBUG =
+#MAX_JOB_ROUTER_LOG = $(MAX_DEFAULT_LOG)
+#JOB_ROUTER_DEBUG =
-MAX_ROOSTER_LOG = 1000000
-ROOSTER_DEBUG =
+#MAX_ROOSTER_LOG = $(MAX_DEFAULT_LOG)
+#ROOSTER_DEBUG =
-MAX_SHARED_PORT_LOG = 1000000
-SHARED_PORT_DEBUG =
+#MAX_SHARED_PORT_LOG = $(MAX_DEFAULT_LOG)
+#SHARED_PORT_DEBUG =
-MAX_HDFS_LOG = 1000000
-HDFS_DEBUG =
+#MAX_HDFS_LOG = $(MAX_DEFAULT_LOG)
+#HDFS_DEBUG =
# High Availability Logs
-MAX_HAD_LOG = 1000000
-HAD_DEBUG =
-MAX_REPLICATION_LOG = 1000000
-REPLICATION_DEBUG =
-MAX_TRANSFERER_LOG = 1000000
-TRANSFERER_DEBUG =
+#MAX_HAD_LOG = $(MAX_DEFAULT_LOG)
+#HAD_DEBUG =
+#MAX_REPLICATION_LOG = $(MAX_DEFAULT_LOG)
+#REPLICATION_DEBUG =
+#MAX_TRANSFERER_LOG = $(MAX_DEFAULT_LOG)
+#TRANSFERER_DEBUG =
## The daemons touch their log file periodically, even when they have
@@ -690,55 +674,50 @@
#####################################################################
## This where you choose the configuration that you would like to
-## use. It has no defaults so it must be defined. We start this
-## file off with the UWCS_* policy.
+## use. If you dont specify, the default policy is a no-preemption
+## policy. This is what older config files called TESTINGMODE_*
######################################################################
-## Also here is what is referred to as the TESTINGMODE_*, which is
-## a quick hardwired way to test Condor with a simple no-preemption policy.
-## Replace UWCS_* with TESTINGMODE_* if you wish to do testing mode.
-## For example:
-## WANT_SUSPEND = $(UWCS_WANT_SUSPEND)
-## becomes
-## WANT_SUSPEND = $(TESTINGMODE_WANT_SUSPEND)
-
# When should we only consider SUSPEND instead of PREEMPT?
-WANT_SUSPEND = $(TESTINGMODE_WANT_SUSPEND)
+#WANT_SUSPEND = False
# When should we preempt gracefully instead of hard-killing?
-WANT_VACATE = $(TESTINGMODE_WANT_VACATE)
+#WANT_VACATE = False
## When is this machine willing to start a job?
-START = $(TESTINGMODE_START)
+#START = True
-## When should a local universe job be allowed to start?
-#START_LOCAL_UNIVERSE = TotalLocalJobsRunning < 200
-
-## When should a scheduler universe job be allowed to start?
-#START_SCHEDULER_UNIVERSE = TotalSchedulerJobsRunning < 200
-
## When to suspend a job?
-SUSPEND = $(TESTINGMODE_SUSPEND)
+#SUSPEND = False
## When to resume a suspended job?
-CONTINUE = $(TESTINGMODE_CONTINUE)
+#CONTINUE = True
## When to nicely stop a job?
## (as opposed to killing it instantaneously)
-PREEMPT = $(TESTINGMODE_PREEMPT)
+#PREEMPT = False
## When to instantaneously kill a preempting job
## (e.g. if a job is in the pre-empting stage for too long)
-KILL = $(TESTINGMODE_KILL)
+#KILL = False
-PERIODIC_CHECKPOINT = $(TESTINGMODE_PERIODIC_CHECKPOINT)
-PREEMPTION_REQUIREMENTS = $(TESTINGMODE_PREEMPTION_REQUIREMENTS)
-PREEMPTION_RANK = $(TESTINGMODE_PREEMPTION_RANK)
-NEGOTIATOR_PRE_JOB_RANK = $(TESTINGMODE_NEGOTIATOR_PRE_JOB_RANK)
-NEGOTIATOR_POST_JOB_RANK = $(TESTINGMODE_NEGOTIATOR_POST_JOB_RANK)
-MaxJobRetirementTime = $(UWCS_MaxJobRetirementTime)
-CLAIM_WORKLIFE = $(TESTINGMODE_CLAIM_WORKLIFE)
+## A non-zero value here is another way to have a no-preemption policy
+## a job is given this many seconds to exit even when it is preempted.
+#MaxJobRetirementTime = 0
+#PERIODIC_CHECKPOINT = ((time() - LastPeriodicCheckpoint)/60.0) > (180.0 + $RANDOM_INTEGER(-30,30,1))
+#PREEMPTION_REQUIREMENTS = False
+#PREEMPTION_RANK = $(UWCS_PREEMPTION_RANK)
+#NEGOTIATOR_PRE_JOB_RANK = RemoteOwner =?= UNDEFINED
+#NEGOTIATOR_POST_JOB_RANK = (RemoteOwner =?= UNDEFINED) * (ifthenElse(isUndefined(KFlops), 1000, Kflops) - SlotID - 1.0e10*(Offline=?=True))
+#CLAIM_WORKLIFE = 1200
+
+## When should a local universe job be allowed to start?
+#START_LOCAL_UNIVERSE = TotalLocalJobsRunning < 200
+
+## When should a scheduler universe job be allowed to start?
+#START_SCHEDULER_UNIVERSE = TotalSchedulerJobsRunning < 200
+
#####################################################################
## This is the UWisc - CS Department Configuration.
#####################################################################
@@ -866,7 +845,7 @@
## would take place, allowing control to pass on to
## PREEMPTION_RANK.
UWCS_NEGOTIATOR_POST_JOB_RANK = \
- (RemoteOwner =?= UNDEFINED) * (KFlops - SlotID - 1.0e10*(Offline=?=True))
+ (RemoteOwner =?= UNDEFINED) * (ifthenElse(isUndefined(KFlops), 1000, Kflops) - SlotID - 1.0e10*(Offline=?=True))
## The negotiator will not preempt a job running on a given machine
## unless the PREEMPTION_REQUIREMENTS expression evaluates to true
@@ -880,14 +859,15 @@
## The PREEMPTION_RANK expression is used in a case where preemption
## is the only option and all other negotiation ranks are equal. For
## example, if the job has no preference, it is usually preferable to
-## preempt a job with a small ImageSize instead of a job with a large
-## ImageSize. The default is to rank all preemptable matches the
-## same. However, the negotiator will always prefer to match the job
-## with an idle machine over a preemptable machine, if all other
-## negotiation ranks are equal.
-UWCS_PREEMPTION_RANK = (RemoteUserPrio * 1000000) - TARGET.ImageSize
+## preempt a job that has just started instead of a job with a longer
+## runtime, to cause less badput. The default is to rank all
+## preemptable matches the same. However, the negotiator will always
+## prefer to match the job with an idle machine over a preemptable
+## machine, if all other negotiation ranks are equal.
+UWCS_PREEMPTION_RANK = (RemoteUserPrio * 1000000) - ifThenElse(isUndefined(TotalJobRuntime), 0, TotalJobRuntime)
+
#####################################################################
## This is a Configuration that will cause your Condor jobs to
## always run. This is intended for testing only.
@@ -1064,7 +1044,7 @@
#HIGHPORT = 9700
#LOWPORT = 9600
-## If a daemon doesn't respond for too long, do you want go generate
+## If a daemon does not respond for too long, do you want go generate
## a core file? This basically controls the type of the signal
## sent to the child process, and mostly affects the Condor Master
#NOT_RESPONDING_WANT_CORE = False
@@ -1086,7 +1066,7 @@
#MASTER, STARTD, SCHEDD, KBDD, COLLECTOR, NEGOTIATOR, EVENTD, \
#VIEW_SERVER, CONDOR_VIEW, VIEW_COLLECTOR, HAWKEYE, CREDD, HAD, \
#DBMSD, QUILL, JOB_ROUTER, ROOSTER, LEASEMANAGER, HDFS, SHARED_PORT, \
-#DEFRAG
+#DEFRAG, GANGLIAD
## Where are the binaries for these daemons?
@@ -1104,6 +1084,7 @@
SHARED_PORT = $(LIBEXEC)/condor_shared_port
TRANSFERER = $(LIBEXEC)/condor_transferer
DEFRAG = $(LIBEXEC)/condor_defrag
+GANGLIAD = $(LIBEXEC)/condor_gangliad
## When the master starts up, it can place it's address (IP and port)
## into a file. This way, tools running on the local machine don't
@@ -1143,7 +1124,7 @@
#MASTER_UPDATE_INTERVAL = 300
## How often do you want the master to check the time stamps of the
-## daemons it's running? If any daemons have been modified, the
+## running daemons? If any daemons have been modified, the
## master restarts them.
#MASTER_CHECK_NEW_EXEC_INTERVAL = 300
@@ -1178,13 +1159,21 @@
##--------------------------------------------------------------------
## condor_collector
##--------------------------------------------------------------------
-## Address to which Condor will send a weekly e-mail with output of
-## condor_status.
+## Address to which Condor will send a weekly e-mail with basic
+## non-specific information about your pool. See
+## http://htcondor.org/privacy.html for more information.
+## To disable this behavior, uncomment the below line to
+## explicitly set CONDOR_DEVELOPERS to NONE.
#CONDOR_DEVELOPERS = condor-admin at cs.wisc.edu
+#CONDOR_DEVELOPERS = NONE
-## Global Collector to periodically advertise basic information about
-## your pool.
+## Global Collector to periodically advertise basic
+## non-specific information about your pool. See
+## http://htcondor.org/privacy.html for more information.
+## To disable this behavior, uncomment the below line to
+## explicitly set CONDOR_DEVELOPERS_COLLECTOR to NONE.
#CONDOR_DEVELOPERS_COLLECTOR = condor.cs.wisc.edu
+#CONDOR_DEVELOPERS_COLLECTOR = NONE
## When the collector starts up, it can place it's address (IP and port)
## into a file. This way, tools running on the local machine don't
@@ -1194,7 +1183,14 @@
## COLLECTOR_HOST, a useful technique for personal Condor installs.
COLLECTOR_ADDRESS_FILE = $(LOG)/.collector_address
+##
+## Conventional HTCondor installations start up as root, and can thus
+## set their own file descriptor limit. Upping the collector's doesn't
+## hurt anything and ameliorates a common scalability problem.
+##
+# COLLECTOR_MAX_FILE_DESCRIPTORS = 10240
+
##--------------------------------------------------------------------
## condor_negotiator
##--------------------------------------------------------------------
@@ -1244,15 +1240,13 @@
## When a machine unclaimed, when should it run benchmarks?
## LastBenchmark is initialized to 0, so this expression says as soon
-## as we're unclaimed, run the benchmarks. Thereafter, if we're
-## unclaimed and it's been at least 4 hours since we ran the last
+## as we're unclaimed, run the benchmarks. Thereafter, if we are
+## unclaimed and it has been at least 4 hours since we ran the last
## benchmarks, run them again. The startd keeps a weighted average
## of the benchmark results to provide more accurate values.
-## Note, if you don't want any benchmarks run at all, either comment
-## RunBenchmarks out, or set it to "False".
-BenchmarkTimer = (time() - LastBenchmark)
-RunBenchmarks : (LastBenchmark == 0 ) || ($(BenchmarkTimer) >= (4 * $(HOUR)))
-#RunBenchmarks : False
+## Note, if you don't want any benchmarks run at all, set RunBenchmarks to "False".
+#RunBenchmarks = False
+#RunBenchmarks = (LastBenchmark == 0 ) || ((time() - LastBenchmark) >= (4 * $(HOUR)))
## When the startd does benchmarks, which set of benchmarks should we
## run? The default is the same as pre-7.5.6: MIPS and KFLOPS.
@@ -1299,7 +1293,7 @@
## entry into the ClassAd. In particular, ClassAds require double
## quotes (") around your strings. Numeric values can go in
## directly, as can boolean expressions. For example, if you wanted
-## the startd to advertise its list of console devices, when it's
+## the startd to advertise its list of console devices, when it is
## configured to run benchmarks, and how often it sends updates to
## the central manager, you'd have to define the following helper
## macro:
@@ -1360,7 +1354,7 @@
## Please read the section on "condor_startd Configuration File
## Macros" in the Condor Administrators Manual for a further
## discussion of this setting. Its use is not recommended. This
-## must be an integer ("N" isn't a valid setting, that's just used to
+## must be an integer ("N" is not a valid setting, that's just used to
## represent the default).
#NUM_CPUS = N
@@ -1368,7 +1362,7 @@
## line out. You must restart the startd for this setting to take
## effect. If set to 0 or a negative number, it is ignored.
## By default, it is ignored. Otherwise, it must be a positive
-## integer ("N" isn't a valid setting, that's just used to
+## integer ("N" is not a valid setting, that's just used to
## represent the default).
#MAX_NUM_CPUS = N
@@ -1434,23 +1428,21 @@
## report to your pool (if less than the total number of CPUs). This
## setting is only considered if the "type" settings described above
## are not in use. By default, all CPUs are reported. This setting
-## must be an integer ("N" isn't a valid setting, that's just used to
+## must be an integer ("N" is not a valid setting, that's just used to
## represent the default).
#NUM_SLOTS = N
## How many of the slots the startd is representing should
## be "connected" to the console (in other words, notice when there's
-## console activity)? This defaults to all slots (N in a
-## machine with N CPUs). This must be an integer ("N" isn't a valid
-## setting, that's just used to represent the default).
-#SLOTS_CONNECTED_TO_CONSOLE = N
+## console activity)? This defaults to all slots
+#SLOTS_CONNECTED_TO_CONSOLE = $(NUM_CPUS)
## How many of the slots the startd is representing should
## be "connected" to the keyboard (for remote tty activity, as well
-## as console activity). Defaults to 1.
-#SLOTS_CONNECTED_TO_KEYBOARD = 1
+## as console activity). Defaults to all slots
+#SLOTS_CONNECTED_TO_KEYBOARD = $(NUM_CPUS)
-## If there are slots that aren't connected to the
+## If there are slots that are not connected to the
## keyboard or the console (see the above two settings), the
## corresponding idle time reported will be the time since the startd
## was spawned, plus the value of this parameter. It defaults to 20
@@ -1531,7 +1523,7 @@
#JOB_START_DELAY = 2
## How many concurrent sub-processes should the schedd spawn to handle
-## queries? (Unix only)
+## queries? (UNIX only)
#SCHEDD_QUERY_WORKERS = 3
## How often should the schedd send a keep alive message to any
@@ -1580,6 +1572,14 @@
## (Currently, these are periodic_hold, periodic_release, and periodic_remove.)
#PERIODIC_EXPR_INTERVAL = 60
+##
+## Conventional HTCondor installations start up as root, and can thus
+## set their own file descriptor limit. Upping the schedd's limit has
+## some minor downsides (larger buffers passed to select() and the like),
+## but upping it makes a class of difficult-to-debug problems much rarer.
+##
+# SCHEDD_MAX_FILE_DESCRIPTORS = 4096
+
######
## Queue management settings:
######
@@ -1654,10 +1654,10 @@
#STARTER_LOCAL_LOGGING = TRUE
## If the UID_DOMAIN settings match on both the execute and submit
-## machines, but the UID of the user who submitted the job isn't in
+## machines, but the UID of the user who submitted the job is not in
## the passwd file of the execute machine, the starter will normally
## exit with an error. Do you want the starter to just start up the
-## job with the specified UID, even if it's not in the passwd file?
+## job with the specified UID, even if it is not in the passwd file?
#SOFT_UID_DOMAIN = FALSE
## honor the run_as_owner option from the condor submit file.
@@ -1764,16 +1764,7 @@
## Who should condor_preen send email to?
#PREEN_ADMIN = $(CONDOR_ADMIN)
-## What files should condor_preen leave in the spool directory?
-VALID_SPOOL_FILES = job_queue.log, job_queue.log.tmp, history, \
- Accountant.log, Accountantnew.log, \
- local_univ_execute, .quillwritepassword, \
- .pgpass, \
- .schedd_address, .schedd_classad
-## What files should condor_preen remove from the log directory?
-INVALID_LOG_FILES = core
-
##--------------------------------------------------------------------
## Java parameters:
##--------------------------------------------------------------------
@@ -1836,8 +1827,8 @@
## Another option is to use /tmp as the location of the GridManager log.
##
-MAX_GRIDMANAGER_LOG = 1000000
-GRIDMANAGER_DEBUG =
+#MAX_GRIDMANAGER_LOG = $(MAX_DEFAULT_LOG)
+#GRIDMANAGER_DEBUG =
GRIDMANAGER_LOG = $(LOG)/GridmanagerLog.$(USERNAME)
GRIDMANAGER_LOCK = $(LOCK)/GridmanagerLock.$(USERNAME)
@@ -1846,13 +1837,17 @@
## Various other settings that the Condor-G can use.
##--------------------------------------------------------------------
-## The number of seconds between status update requests. You can make
-## this short (5 seconds) if you want Condor to respond quickly to
-## instances as they terminate, or you can make it long (300 seconds = 5
-## minutes) if you know your instances will run for awhile and don't
-## mind delay between when they stop and when Condor responds to them
-## stopping.
-GRIDMANAGER_JOB_PROBE_INTERVAL = 300
+## Adjust how frequently the gridmanager checks the status of grid jobs.
+## GRIDMANAGER_JOB_PROBE_INTERVAL sets the minimum time between checks
+## for each job's status in seconds. For grid-types where each job's
+## status must be checked individually (gt2, gt5, nordugrid, batch),
+## GRIDMANAGER_JOB_PROBE_RATE sets the maximum number of status check
+## requests the gridmanager will send to each remote resource per
+## second. If enough jobs are submitted to a remote resource, the
+## interval between checks for each job will increase so as not to
+## exceed the set rate.
+#GRIDMANAGER_JOB_PROBE_INTERVAL = 60
+#GRIDMANAGER_JOB_PROBE_RATE = 5
## For grid-type gt2 jobs (pre-WS GRAM), limit the number of jobmanager
## processes the gridmanager will let run on the headnode. Letting too
@@ -1919,7 +1914,7 @@
## UNIX permissions 1777 (just like /tmp )
## Another option is to use /tmp as the location of the CGAHP log.
##
-MAX_C_GAHP_LOG = 1000000
+#MAX_C_GAHP_LOG = $(MAX_DEFAULT_LOG)
#C_GAHP_LOG = $(LOG)/GridLogs/CGAHPLog.$(USERNAME)
C_GAHP_LOG = /tmp/CGAHPLog.$(USERNAME)
@@ -1996,11 +1991,11 @@
## CredD daemon debugging log
CREDD_LOG = $(LOG)/CredLog
CREDD_DEBUG = D_FULLDEBUG
-MAX_CREDD_LOG = 4000000
+#MAX_CREDD_LOG = $(MAX_DEFAULT_LOG)
-## The credential owner submits the credential. This list specififies
+## The credential owner submits the credential. This list specifies
## other user who are also permitted to see all credentials. Defaults
-## to root on Unix systems, and Administrator on Windows systems.
+## to root on UNIX systems, and Administrator on Windows systems.
#CRED_SUPER_USERS =
## Credential storage location. This directory must exist
@@ -2020,199 +2015,9 @@
## credentials, at this interval.
#CRED_CHECK_INTERVAL = 60
-##
-##--------------------------------------------------------------------
-## Stork data placement server
-##--------------------------------------------------------------------
-## Where is the Stork binary installed?
-STORK = $(SBIN)/stork_server
-## When Stork starts up, it can place it's address (IP and port)
-## into a file. This way, tools running on the local machine don't
-## need an additional "-n host:port" command line option. This
-## feature can be turned off by commenting out this setting.
-STORK_ADDRESS_FILE = $(LOG)/.stork_address
-
-## Specify a remote Stork server here,
-#STORK_HOST = $(CONDOR_HOST):$(STORK_PORT)
-
-## STORK_LOG_BASE specifies the basename for heritage Stork log files.
-## Stork uses this macro to create the following output log files:
-## $(STORK_LOG_BASE): Stork server job queue classad collection
-## journal file.
-## $(STORK_LOG_BASE).history: Used to track completed jobs.
-## $(STORK_LOG_BASE).user_log: User level log, also used by DAGMan.
-STORK_LOG_BASE = $(LOG)/Stork
-
-## Modern Condor DaemonCore logging feature.
-STORK_LOG = $(LOG)/StorkLog
-STORK_DEBUG = D_FULLDEBUG
-MAX_STORK_LOG = 4000000
-
-## Stork startup arguments
-## Start Stork on a well-known port. Uncomment to to simplify
-## connecting to a remote Stork. Note: that this interface may change
-## in a future release.
-#STORK_PORT = 34048
-STORK_PORT = 9621
-STORK_ARGS = -p $(STORK_PORT) -f -Serverlog $(STORK_LOG_BASE)
-
-## Stork environment. Stork modules may require external programs and
-## shared object libraries. These are located using the PATH and
-## LD_LIBRARY_PATH environments. Further, some modules may require
-## further specific environments. By default, Stork inherits a full
-## environment when invoked from condor_master or the shell. If the
-## default environment is not adequate for all Stork modules, specify
-## a replacement environment here. This environment will be set by
-## condor_master before starting Stork, but does not apply if Stork is
-## started directly from the command line.
-#STORK_ENVIRONMENT = TMP=/tmp;CONDOR_CONFIG=/special/config;PATH=/lib
-
-## Limits the number of concurrent data placements handled by Stork.
-#STORK_MAX_NUM_JOBS = 5
-
-## Limits the number of retries for a failed data placement.
-#STORK_MAX_RETRY = 5
-
-## Limits the run time for a data placement job, after which the
-## placement is considered failed.
-#STORK_MAXDELAY_INMINUTES = 10
-
-## Temporary credential storage directory used by Stork.
-#STORK_TMP_CRED_DIR = /tmp
-
-## Directory containing Stork modules.
-#STORK_MODULE_DIR = $(LIBEXEC)
-
##
##--------------------------------------------------------------------
-## Quill Job Queue Mirroring Server
-##--------------------------------------------------------------------
-## Where is the Quill binary installed and what arguments should be passed?
-QUILL = $(SBIN)/condor_quill
-#QUILL_ARGS =
-
-# Where is the log file for the quill daemon?
-QUILL_LOG = $(LOG)/QuillLog
-
-# The identification and location of the quill daemon for local clients.
-QUILL_ADDRESS_FILE = $(LOG)/.quill_address
-
-# If this is set to true, then the rest of the QUILL arguments must be defined
-# for quill to function. If it is False or left undefined, then quill will not
-# be consulted by either the scheduler or the tools, but in the case of a
-# remote quill query where the local client has quill turned off, but the
-# remote client has quill turned on, things will still function normally.
-#QUILL_ENABLED = TRUE
-
-#
-# If Quill is enabled, by default it will only mirror the current job
-# queue into the database. For historical jobs, and classads from other
-# sources, the SQL Log must be enabled.
-#QUILL_USE_SQL_LOG=FALSE
-
-#
-# The SQL Log can be enabled on a per-daemon basis. For example, to collect
-# historical job information, but store no information about execute machines,
-# uncomment these two lines
-#QUILL_USE_SQL_LOG = FALSE
-#SCHEDD.QUILL_USE_SQL_LOG = TRUE
-
-# This will be the name of a quill daemon using this config file. This name
-# should not conflict with any other quill name--or schedd name.
-#QUILL_NAME = quill at postgresql-server.machine.com
-
-# The Postgreql server requires usernames that can manipulate tables. This will
-# be the username associated with this instance of the quill daemon mirroring
-# a schedd's job queue. Each quill daemon must have a unique username
-# associated with it otherwise multiple quill daemons will corrupt the data
-# held under an identical user name.
-#QUILL_DB_NAME = name_of_db
-
-# The required password for the DB user which quill will use to read
-# information from the database about the queue.
-#QUILL_DB_QUERY_PASSWORD = foobar
-
-# What kind of database server is this?
-# For now, only PGSQL is supported
-#QUILL_DB_TYPE = PGSQL
-
-# The machine and port of the postgres server.
-# Although this says IP Addr, it can be a DNS name.
-# It must match whatever format you used for the .pgpass file, however
-#QUILL_DB_IP_ADDR = machine.domain.com:5432
-
-# The login to use to attach to the database for updating information.
-# There should be an entry in file $SPOOL/.pgpass that gives the password
-# for this login id.
-#QUILL_DB_USER = quillwriter
-
-# Polling period, in seconds, for when quill reads transactions out of the
-# schedd's job queue log file and puts them into the database.
-#QUILL_POLLING_PERIOD = 10
-
-# Allows or disallows a remote query to the quill daemon and database
-# which is reading this log file. Defaults to true.
-#QUILL_IS_REMOTELY_QUERYABLE = TRUE
-
-# Add debugging flags to here if you need to debug quill for some reason.
-#QUILL_DEBUG = D_FULLDEBUG
-
-# Number of seconds the master should wait for the Quill daemon to respond
-# before killing it. This number might need to be increased for very
-# large logfiles.
-# The default is 3600 (one hour), but kicking it up to a few hours won't hurt
-#QUILL_NOT_RESPONDING_TIMEOUT = 3600
-
-# Should Quill hold open a database connection to the DBMSD?
-# Each open connection consumes resources at the server, so large pools
-# (100 or more machines) should set this variable to FALSE. Note the
-# default is TRUE.
-#QUILL_MAINTAIN_DB_CONN = TRUE
-
-##
-##--------------------------------------------------------------------
-## Database Management Daemon settings
-##--------------------------------------------------------------------
-## Where is the DBMSd binary installed and what arguments should be passed?
-DBMSD = $(SBIN)/condor_dbmsd
-DBMSD_ARGS = -f
-
-# Where is the log file for the quill daemon?
-DBMSD_LOG = $(LOG)/DbmsdLog
-
-# Interval between consecutive purging calls (in seconds)
-#DATABASE_PURGE_INTERVAL = 86400
-
-# Interval between consecutive database reindexing operations
-# This is only used when dbtype = PGSQL
-#DATABASE_REINDEX_INTERVAL = 86400
-
-# Number of days before purging resource classad history
-# This includes things like machine ads, daemon ads, submitters
-#QUILL_RESOURCE_HISTORY_DURATION = 7
-
-# Number of days before purging job run information
-# This includes job events, file transfers, matchmaker matches, etc
-# This does NOT include the final job ad. condor_history does not need
-# any of this information to work.
-#QUILL_RUN_HISTORY_DURATION = 7
-
-# Number of days before purging job classad history
-# This is the information needed to run condor_history
-#QUILL_JOB_HISTORY_DURATION = 3650
-
-# DB size threshold for warning the condor administrator. This is checked
-# after every purge. The size is given in gigabytes.
-#QUILL_DBSIZE_LIMIT = 20
-
-# Number of seconds the master should wait for the DBMSD to respond before
-# killing it. This number might need to be increased for very large databases
-# The default is 3600 (one hour).
-#DBMSD_NOT_RESPONDING_TIMEOUT = 3600
-
-##
-##--------------------------------------------------------------------
## VM Universe Parameters
##--------------------------------------------------------------------
## Where is the Condor VM-GAHP installed? (Required)
@@ -2226,7 +2031,7 @@
## However, on Windows machine you must always define VM_GAHP_LOG.
#
VM_GAHP_LOG = $(LOG)/VMGahpLog
-MAX_VM_GAHP_LOG = 1000000
+#MAX_VM_GAHP_LOG = $(MAX_DEFAULT_LOG)
#VM_GAHP_DEBUG = D_FULLDEBUG
## What kind of virtual machine program will be used for
@@ -2293,7 +2098,7 @@
## will occur.
## For VMware, we send SIGSTOP to a process for VM in order to
## stop the VM temporarily and send SIGCONT to resume the VM.
-## For Xen, we pause CPU. Pausing CPU doesn't save the memory of VM
+## For Xen, we pause CPU. Pausing CPU does not save the memory of VM
## into a file. It only stops the execution of a VM temporarily.
#VM_SOFT_SUSPEND = TRUE
@@ -2305,7 +2110,7 @@
## Notice: In VMware VM universe, "nobody" can not create a VMware VM.
## So we need to define "VM_UNIV_NOBODY_USER" with a regular user.
## For VMware, the user defined in "VM_UNIV_NOBODY_USER" must have a
-## home directory. So SOFT_UID_DOMAIN doesn't work for VMware VM universe job.
+## home directory. So SOFT_UID_DOMAIN does not work for VMware VM universe job.
## If neither "VM_UNIV_NOBODY_USER" nor "SLOTx_VMUSER"/"SLOTx_USER" is defined,
## VMware VM universe job will run as "condor" instead of "nobody".
## As a result, the preference of local users for a VMware VM universe job
@@ -2377,38 +2182,7 @@
## a disk image
#XEN_BOOTLOADER = /usr/bin/pygrub
-##
-##--------------------------------------------------------------------
-## condor_lease_manager lease manager daemon
-##--------------------------------------------------------------------
-## Where is the LeaseManager binary installed?
-LeaseManager = $(SBIN)/condor_lease_manager
-# Turn on the lease manager
-#DAEMON_LIST = $(DAEMON_LIST), LeaseManager
-
-# The identification and location of the lease manager for local clients.
-LeaseManger_ADDRESS_FILE = $(LOG)/.lease_manager_address
-
-## LeaseManager startup arguments
-#LeaseManager_ARGS = -local-name generic
-
-## LeaseManager daemon debugging log
-LeaseManager_LOG = $(LOG)/LeaseManagerLog
-LeaseManager_DEBUG = D_FULLDEBUG
-MAX_LeaseManager_LOG = 1000000
-
-# Basic parameters
-LeaseManager.GETADS_INTERVAL = 60
-LeaseManager.UPDATE_INTERVAL = 300
-LeaseManager.PRUNE_INTERVAL = 60
-LeaseManager.DEBUG_ADS = False
-
-LeaseManager.CLASSAD_LOG = $(SPOOL)/LeaseManagerState
-#LeaseManager.QUERY_ADTYPE = Any
-#LeaseManager.QUERY_CONSTRAINTS = MyType == "SomeType"
-#LeaseManager.QUERY_CONSTRAINTS = TargetType == "SomeType"
-
##
##--------------------------------------------------------------------
## KBDD - keyboard activity detection daemon
@@ -2498,16 +2272,16 @@
## The host and port for hadoop's name node. If this machine is the
## name node (see HDFS_SERVICES) then the specified port will be used
## to run name node.
-HDFS_NAMENODE = hdfs://example.com:9000
-HDFS_NAMENODE_WEB = example.com:8000
+#HDFS_NAMENODE = hdfs://example.com:9000
+#HDFS_NAMENODE_WEB = example.com:8000
-HDFS_BACKUPNODE = hdfs://example.com:50100
-HDFS_BACKUPNODE_WEB = example.com:50105
+#HDFS_BACKUPNODE = hdfs://example.com:50100
+#HDFS_BACKUPNODE_WEB = example.com:50105
## You need to pick one machine as name node by setting this parameter
## to HDFS_NAMENODE. The remaining machines in a storage cluster will
## act as data nodes (HDFS_DATANODE).
-HDFS_NODETYPE = HDFS_DATANODE
+#HDFS_NODETYPE = HDFS_DATANODE
## If machine is selected to be NameNode then by a role should defined.
## If it selected to be DataNode then this parameter is ignored.
@@ -2517,7 +2291,7 @@
## creating a backup of the namespace. Currently the NameNode
## supports one Backup node at a time.
## CHECKPOINT: Periodically creates checkpoints of the namespace.
-HDFS_NAMENODE_ROLE = ACTIVE
+#HDFS_NAMENODE_ROLE = ACTIVE
## The two set of directories that are required by HDFS are for name
## node (HDFS_NAMENODE_DIR) and data node (HDFS_DATANODE_DIR). The
@@ -2525,8 +2299,8 @@
## name node service and is used to store critical meta data for
## files. The data node needs its directory to store file blocks and
## their replicas.
-HDFS_NAMENODE_DIR = /tmp/hadoop_name
-HDFS_DATANODE_DIR = /scratch/tmp/hadoop_data
+#HDFS_NAMENODE_DIR = /tmp/hadoop_name
+#HDFS_DATANODE_DIR = /scratch/tmp/hadoop_data
## Unlike name node address settings (HDFS_NAMENODE), that needs to be
## well known across the storage cluster, data node can run on any
Modified: trunk/dports/science/htcondor/files/patch-build-cmake-CondorPackageConfig.cmake.diff
===================================================================
--- trunk/dports/science/htcondor/files/patch-build-cmake-CondorPackageConfig.cmake.diff 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/files/patch-build-cmake-CondorPackageConfig.cmake.diff 2015-10-01 04:00:22 UTC (rev 140719)
@@ -1,6 +1,6 @@
---- build/cmake/CondorPackageConfig.cmake.orig 2013-05-20 11:00:54.000000000 -0700
-+++ build/cmake/CondorPackageConfig.cmake 2013-05-20 11:01:42.000000000 -0700
-@@ -116,7 +116,7 @@
+--- build/cmake/CondorPackageConfig.cmake
++++ build/cmake/CondorPackageConfig.cmake
+@@ -118,7 +118,7 @@ set( C_LIB32 lib)
set( C_LIBEXEC libexec )
set( C_SBIN sbin)
Modified: trunk/dports/science/htcondor/files/patch-src-condor_scripts-macosx_rewrite_libs.diff
===================================================================
--- trunk/dports/science/htcondor/files/patch-src-condor_scripts-macosx_rewrite_libs.diff 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/files/patch-src-condor_scripts-macosx_rewrite_libs.diff 2015-10-01 04:00:22 UTC (rev 140719)
@@ -1,5 +1,5 @@
---- src/condor_scripts/macosx_rewrite_libs.orig 2013-06-07 00:27:47.000000000 -0700
-+++ src/condor_scripts/macosx_rewrite_libs 2013-06-07 00:27:52.000000000 -0700
+--- src/condor_scripts/macosx_rewrite_libs
++++ src/condor_scripts/macosx_rewrite_libs
@@ -1,43 +1 @@
#!/bin/sh
-
Modified: trunk/dports/science/htcondor/files/patch-src-condor_utils-condor_config.cpp.diff
===================================================================
--- trunk/dports/science/htcondor/files/patch-src-condor_utils-condor_config.cpp.diff 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/files/patch-src-condor_utils-condor_config.cpp.diff 2015-10-01 04:00:22 UTC (rev 140719)
@@ -1,5 +1,5 @@
---- src/condor_utils/condor_config.cpp.orig 2013-05-16 14:43:02.000000000 -0700
-+++ src/condor_utils/condor_config.cpp 2013-05-16 14:45:12.000000000 -0700
+--- src/condor_utils/condor_config.cpp
++++ src/condor_utils/condor_config.cpp
@@ -35,7 +35,7 @@
doesn't exist, we look in the following locations:
@@ -9,28 +9,28 @@
3) /usr/local/etc/
4) ~condor/
-@@ -576,7 +576,7 @@
+@@ -916,7 +916,7 @@ real_config(const char* host, int wantsQuiet, int config_options)
fprintf(stderr,"\nNeither the environment variable %s_CONFIG,\n",
myDistro->GetUc() );
# if defined UNIX
-- fprintf(stderr,"/etc/%s/, nor ~%s/ contain a %s_config source.\n",
-+ fprintf(stderr,"@prefix@/etc/%s/, nor ~%s/ contain a %s_config source.\n",
+- fprintf(stderr,"/etc/%s/, /usr/local/etc/, nor ~%s/ contain a %s_config source.\n",
++ fprintf(stderr,"@prefix@/etc/%s/, /usr/local/etc/, nor ~%s/ contain a %s_config source.\n",
myDistro->Get(), myDistro->Get(), myDistro->Get() );
# elif defined WIN32
fprintf(stderr,"nor the registry contains a %s_config source.\n", myDistro->Get() );
-@@ -586,7 +586,7 @@
+@@ -926,7 +926,7 @@ real_config(const char* host, int wantsQuiet, int config_options)
fprintf( stderr,"Either set %s_CONFIG to point to a valid config "
"source,\n", myDistro->GetUc() );
# if defined UNIX
-- fprintf( stderr,"or put a \"%s_config\" file in /etc/%s or ~%s/\n",
-+ fprintf( stderr,"or put a \"%s_config\" file in @prefix@/etc/%s or ~%s/\n",
+- fprintf( stderr,"or put a \"%s_config\" file in /etc/%s/ /usr/local/etc/ or ~%s/\n",
++ fprintf( stderr,"or put a \"%s_config\" file in @prefix@/etc/%s/ /usr/local/etc/ or ~%s/\n",
myDistro->Get(), myDistro->Get(), myDistro->Get() );
# elif defined WIN32
fprintf( stderr,"or put a \"%s_config\" source in the registry at:\n"
-@@ -1060,8 +1060,8 @@
- formatstr( locations[0], "%s/.%s/%s", pw->pw_dir, myDistro->Get(),
- file_name );
- }
+@@ -1477,8 +1477,8 @@ find_file(const char *env_name, const char *file_name, int config_options)
+ // 1) $HOME/.condor/condor_config
+ // $HOME/.condor/condor_config was added for BOSCO and never used, We are removing it in 8.3.1, but may put it back if users complain.
+ //find_user_file(locations[0], file_name, false);
- // 2) /etc/condor/condor_config
- locations[1].formatstr( "/etc/%s/%s", myDistro->Get(), file_name );
+ // 2) @prefix@/etc/condor/condor_config
Modified: trunk/dports/science/htcondor/files/patch-src-python-bindings-CMakeLists.txt.diff
===================================================================
--- trunk/dports/science/htcondor/files/patch-src-python-bindings-CMakeLists.txt.diff 2015-10-01 02:29:01 UTC (rev 140718)
+++ trunk/dports/science/htcondor/files/patch-src-python-bindings-CMakeLists.txt.diff 2015-10-01 04:00:22 UTC (rev 140719)
@@ -1,14 +1,14 @@
---- src/python-bindings/CMakeLists.txt.orig 2014-07-17 11:44:13.000000000 -0700
-+++ src/python-bindings/CMakeLists.txt 2014-07-17 11:44:25.000000000 -0700
-@@ -11,9 +11,9 @@
- set ( CMAKE_LIBRARY_PATH CMAKE_LIBRARY_PATH_ORIG)
+--- src/python-bindings/CMakeLists.txt
++++ src/python-bindings/CMakeLists.txt
+@@ -101,9 +101,9 @@ else()
+ set ( CMAKE_LIBRARY_PATH CMAKE_LIBRARY_PATH_ORIG)
- if (PROPER AND "${PYTHON_VERSION_MAJOR}" MATCHES "3")
-- set ( PYTHON_BOOST_LIB boost_python3 )
-+ set ( PYTHON_BOOST_LIB boost_python3-mt )
- else()
-- set ( PYTHON_BOOST_LIB boost_python )
-+ set ( PYTHON_BOOST_LIB boost_python-mt )
- endif()
+ if (PROPER AND "${PYTHON_VERSION_MAJOR}" MATCHES "3")
+- set ( PYTHON_BOOST_LIB boost_python3 )
++ set ( PYTHON_BOOST_LIB boost_python3-mt )
+ else()
+- set ( PYTHON_BOOST_LIB boost_python )
++ set ( PYTHON_BOOST_LIB boost_python-mt )
+ endif()
- include_directories(${PYTHON_INCLUDE_DIRS} ${BOOST_INCLUDE})
+ include_directories(${PYTHON_INCLUDE_DIRS} ${BOOST_INCLUDE})
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/macports-changes/attachments/20150930/c930203c/attachment-0001.html>
More information about the macports-changes
mailing list