// Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. // // NAME // CLSCHMsg.msg // // DESCRIPTION // Message file for CHA (cha.jar and chaconfig.jar) // // NOTES // 1000 - 2000 - Alert messages // 2001 - 5000 - cha messages // 2001 - 2400 server messages // 2401 - 2800 target messages // 2801 - 3200 inference messages // 3201 - 3600 util messages // 3601 - 4000 chactl messages // 5001 - 7000 - chaconfig messages // 5001 - 5400 config messages // 5401 - 5800 nativesystem messages // // // // MODIFIED (MM/DD/YY) // wmiyoshi 11/01/16 - Backport wmiyoshi_bug-23705299 from main // wmiyoshi 06/29/16 - Add retention related messages 3743, 3744 // wmiyoshi 06/28/16 - Add alert message 1005 OCHAD_MIN_RETENTION // sahkumar 06/29/16 - bug-23219387 // wmiyoshi 07/07/16 - Add message 3745 // sahkumar 06/02/16 - bug-23521059 // wmiyoshi 04/21/16 - Add calibration errors // wmiyoshi 04/21/16 - Add alert msg for monitoring failure // sahkumar 04/08/16 - bug-22685231 // sahkumar 04/12/16 - bug-23086443 // wmiyoshi 03/25/16 - Add message for unsupported db version // wmiyoshi 03/16/16 - Add error for OS mismatch // sahkumar 02/16/16 - bug 22545489 // sahkumar 01/14/16 - bug-22550288 // sahkumar 11/02/15 - fix bug 22032050 // sahkumar 09/17/15 - Add query calibrate messages // wmiyoshi 08/07/15 - Fix Action for 2408 MODEL_IN_USE // sahkumar 07/07/15 - Add chactl dump messages // wmiyoshi 06/10/15 - Add message 2412 // wmiyoshi 05/07/15 - Add message 3708 // wmiyoshi 01/09/15 - Add messages for 'chactl query diagnosis' // sahkumar 01/09/15 - Add query model messages // sahkumar 12/30/14 - chactl config changes // wmiyoshi 09/30/14 - Modify message 2407 // wmiyoshi 03/10/14 - Moved messages from PrkfMsg (3662 - 3690) // wmiyoshi 02/04/14 - Added calibration message (3654 - 3658) // wmiyoshi 01/31/14 - Moved messages from PrkfMsg (3652 - 3653) // wmiyoshi 01/24/14 - Moved messages from PrClMsg (3603 - 3651) // wmiyoshi 01/24/14 - Moved messages from PrkoMsg (3601 - 3602) // wmiyoshi 10/30/13 - Add messages 1003 // wmiyoshi 10/17/13 - Add messages 2001 to 2005, 2405 to 2410 // samjo 08/29/13 - Creation // */ // // PACKAGE=package oracle.cha.resources; // IMPORT=import oracle.ops.mgmt.nls.MessageKey; // Alert messages 1001 - 2000 1001, OCHAD_STARTED, "Oracle Cluster Health Analysis Service (OCHAD) started on host {0}." // *Cause: The Oracle Cluster Health Analysis Service (OCHAD) was successfully // started. // *Action: None. / 1002, OCHAD_FAILED, "Oracle Cluster Health Analysis Service (OCHAD) aborted on host {0}. Details in {1}." // *Cause: The Oracle Cluster Health Analysis Service (OCHAD) aborted due to // an internal error. Check the OCHAD log file to determine the cause. // *Action: Determine whether the Oracle Cluster Health Analysis Service // (OCHAD) is online on the node listed in the message using the // 'srvctl status cha -node ' command. If OCHAD is offline, // restart OCHAD using the 'srvctl stop cha -node -f' // command followed by the 'srvctl start cha -node ' // command. If the problem persists, contact Oracle Support Services. / 1003, OCHAD_CALIBRATE_FAILED, "Oracle Cluster Health Analysis Service (OCHAD) failed to calibrate on host {0}. Details in {1}." // *Cause: The Oracle Cluster Health Analysis Service (OCHAD) failed to // calibrate due to an internal error. // *Action: Check the OCHAD calibration log file to determine the cause. // If the problem persists, contact Oracle Support Services. / 1004, OCHAD_MONITORING_FAILED, "Oracle Cluster Health Analysis Service (OCHAD) aborted monitoring of {0} {1} on host {2}. Details in {3}." // *Cause: The Oracle Cluster Health Analysis Service (OCHAD) aborted // monitoring of the target due to an internal error. // *Action: Contact Oracle Support Services. / 1005, OCHAD_MIN_RETENTION, "The available retention time is below the minimum {0} hours. Oracle Cluster Health Analysis Service (OCHAD) is stopping monitoring of all databases." // *Cause: The Oracle Cluster Health Analysis Service (OCHAD) stopped // monitoring all databases because the available retention time // was below the allowed minimum retention time. // *Action: Restart monitoring fewer databases or increase the repository size // by using the command 'chactl resize repository' and restart // monitoring the databases. To restart monitoring a database, use the // command 'chactl monitor database'. / // Server messages 2001 - 2400 2001, CALIBRATE_FAILED, "Oracle Cluster Health Analysis Service (OCHAD) failed to calibrate." // *Cause: This is an internal error. // *Action: Contact Oracle Support Services. / 2002, CALIBRATE_NO_DATA, "There is no calibration data. Calibration failed." // *Cause: There was no calibration data available in the specified time range. // *Action: Reissue the 'oclumon calibrate' command with a valid time range. // Refer to the product documentation or to the online help for // information about the command. / 2003, CALIBRATE_NOT_ENOUGH_DATA, "There is not enough calibration data. Calibration failed." // *Cause: There was not enough calibration data available in the specified // time range. // *Action: Reissue the 'oclumon calibrate' command with a valid time range. // Refer to the product documentation or to the online help for // information about the command. / 2004, LOCAL_INST_ERROR, "Oracle Cluster Health Analysis Service (OCHAD) failed to retrieve calibration data from the local instance \"{0}\"." // *Cause: An error occurred while connecting to or while retrieving data from // the local database instance. // *Action: Take the following steps: // 1) Examine the accompanying error messages. If possible, correct // the issue reported. // 2) Check the status of the local instance of the target database // using the 'srvctl status database' command. If the target // database is offline, issue the 'srvctl start database' command. // Reissue the 'oclumon calibrate' command from a node where an // instance of the target database is running. // 3) If you are unable to determine or correct the problem, contact // Oracle Support Services. / 2005, MGMTDB_ERROR, "Oracle Cluster Health Analysis Service (OCHAD) failed due to a Grid Infrastructure Management Repository error." // *Cause: A Grid Infrastructure Management Repository error occurred. // *Action: Take the following steps: // 1) Examine the accompanying error messages. If possible, correct // the issue reported. // 2) Issue a 'srvctl status mgmtdb' command to determine the status // of the Grid Infrastructure Management Repository. If the Grid // Infrastructure Management Repository is offline, issue the // 'srvctl start mgmtdb' command to start it. If it fails to start, // examine the database instance alert log to determine what // happened. // 3) If you are unable to determine or correct the problem, contact // Oracle Support Services. // Target messages 2401 - 2800 2401, TARGET_TYPE_INVALID, "An invalid target type \"{0}\" was specified to the Oracle Cluster Health Analysis Service (OCHAD)." // *Cause: A request was made to the Oracle Cluster Health Analysis Service // (OCHAD) with an invalid target type. This is an internal error. // *Action: Contact Oracle Support Services. / 2402, TARGET_NAME_IGNORED, "The target name \"{0}\" specified to the Oracle Cluster Health Analysis Service (OCHAD) will be ignored on this node." // *Cause: A request was made to the Oracle Cluster Health Analysis Service // (OCHAD) with a target that was not available on this node. OCHAD // took no action. // *Action: No action required. / 2403, TARGET_ALREADY_MONITORED, "The Oracle Cluster Health Analysis Service (OCHAD) is already monitoring target \"{0}\"." // *Cause: A request was made to the Oracle Cluster Health Analysis Service // (OCHAD) to monitor a target that was already being monitored. OCHAD // took no action. // *Action: No action required. / 2404, TARGET_ALREADY_STOPPED, "The Oracle Cluster Health Analysis Service (OCHAD) is not monitoring target \"{0}\"." // *Cause: A request was made to the Oracle Cluster Health Analysis Service // (OCHAD) to stop monitoring a target that was not being monitored. // OCHAD took no action. // *Action: No action required. / 2405, START_MONITORING, "The Oracle Cluster Health Analysis Service (OCHAD) failed to start monitoring target \"{0}\"." // *Cause: This is an internal error. // *Action: Contact Oracle Support Services. / 2406, STOP_MONITORING, "The Oracle Cluster Health Analysis Service (OCHAD) failed to stop monitoring target \"{0}\"." // *Cause: This is an internal error. // *Action: Contact Oracle Support Services. / 2407, MODEL_NOT_FOUND, "The model '{0}' does not exist." // *Cause: The specified model did not exist. // *Action: Issue the 'chactl config' command to list the available models. // Retry the chactl command using one of the available models. / 2408, MODEL_IN_USE, "The model '{0}' is configured to be used for monitoring." // *Cause: An attempt to remove or rename a model specified one that is // configured to be used for monitoring. // *Action: Issue the 'chactl query model -verbose' command to determine by // which target the model is currently being used. Do not remove or // rename the model while the model is configured to be used by any // target. / 2409, POLICY_MANAGED_NODE_MODEL, "Cannot specify both '-node' and '-model' when server pools are configured." // *Cause: A single model is used by all nodes in a server pool for a given // target. The request was rejected because it tried to change the // model for a target for a subset of nodes in the server pool. // *Action: Issue the 'srvctl monitor' command again without specifying both // '-node' and '-model' option for a give target. / 2410, ADMIN_MANAGED, "server pool name specified in a cluster without user-defined server pools" // *Cause: An attempt was made to start or stop monitoring a target using the // '-serverpool' option in a cluster without user-defined server pools. // *Action: Reissue the command without a server pool name. Refer to the // product documentation or to the online help for information about // the command. / 2411, STATUS, "The Oracle Cluster Health Analysis Service (OCHAD) failed to determine status of target \"{0}\"." // *Cause: This is an internal error. // *Action: Contact Oracle Support Services. / 2412, WILL_MONITOR, "Monitoring is enabled and will start when the database starts up." // *Cause: Status message. // *Action: None. / 2413, MODEL_VERSION, "The model version {0} is incompatible with the CHA software version {1}." // *Cause: An attempt to use the indicated model version was rejected because // it was incompatible with the indicated Cluster Health Advisor (CHA) // version. // *Action: Recalibrate the model using the command 'chactl calibrate'. / // chactl messages 3601 - 4000 3601, MISSING_MANDATORY_OPTION, "missing mandatory option: {0}" // *Cause: The specified mandatory command option was missing. // *Action: Use chactl with the -help option to display option details for the command and ensure that all of the mandatory options are specified. / 3602, INVALID_OPTION, "invalid command line option: {0}" // *Cause: An invalid command line option was specified. // *Action: Review the command inline help or documentation, and specify appropriate options. / 3603, CHA_MONITOR_DB_FAILED, "failed to start monitoring database {0}" // *Cause: An attempt to start monitoring the specified database failed. // *Action: Examine the accompanying error messages for details. / 3604, CHA_MONITOR_DB_FAILED_ON_NODE, "failed to start monitoring database {0} on node {1}" // *Cause: An attempt to start monitoring an instance of the specified database on the specified node failed. // *Action: Examine the accompanying error messages for details. / 3605, CHA_MONITOR_HOST_FAILED, "failed to start monitoring host {0}" // *Cause: An attempt to start monitoring the specified host failed. // *Action: Examine the accompanying error messages for details. / 3606, CHA_UNMONITOR_DB_FAILED, "failed to stop monitoring database {0}" // *Cause: An attempt to stop monitoring the specified database failed. // *Action: Examine the accompanying error messages for details. / 3607, CHA_UNMONITOR_DB_FAILED_ON_NODE, "failed to stop monitoring database {0} on node {1}" // *Cause: An attempt to stop monitoring an instance of the specified database on the specified node failed. // *Action: Examine the accompanying error messages for details. j/ 3608, CHA_UNMONITOR_HOST_FAILED, "failed to stop monitoring host {0}" // *Cause: An attempt to stop monitoring the specified host failed. // *Action: Examine the accompanying error messages for details. / 3609, CHA_MONITOR_INTERNAL_ERROR, "failed to start or stop monitoring due to an internal error" // *Cause: An internal error occurred during an attempt to start or stop monitoring. // *Action: Contact Oracle Support Services. / 3610, CHACTL_INTERNAL_ERROR, "An unexpected error occurred in Cluster Health Advisor control utility." // *Cause: An unexpected error occurred in the Cluster Health Advisor control utility. // *Action: Contact Oracle Support Services. / 3611, CHA_MONITOR_DB_FAILED_DBNAME, "Database name to start monitoring is missing." // *Cause: An attempt to start monitoring a database failed because the name of the database to start monitoring was not specified. // *Action: Examine the accompanying error messages for details. / 3612, CHA_UNMONITOR_DB_FAILED_DBNAME, "Database name to stop monitoring is missing." // *Cause: An attempt to stop monitoring a database failed because the name of the database to stop monitoring was not specified. // *Action: Examine the accompanying error messages for details. / 3613, CHA_GET_STATUS_FAILED, "failed to retrieve the status of Oracle Cluster Health Analysis Service" // *Cause: An attempt to get the status of Oracle Cluster Health Analysis Service failed. // *Action: Examine the accompanying error messages for details. / 3614, CHA_GET_STATUS_FAILED_ON_NODE, "failed to retrieve the status of Oracle Cluster Health Analysis Service for nodes {0}" // *Cause: An attempt to retrieve the status of Oracle Cluster Health Analysis Service for the specified nodes failed. // *Action: Examine the accompanying error messages for details, rectify the problems reported and retry. Otherwise, contact Oracle Support Services. / 3616, CHA_MONITOR_HOST_FAILED_ON_CLUSTER, "failed to start monitoring hosts" // *Cause: An attempt to start monitoring hosts failed. // *Action: Examine the accompanying error messages for details. / 3617, CHA_MONITOR_HOST_FAILED_ON_SRVPOOL, "failed to start monitoring hosts in server pool {0}" // *Cause: An attempt to start monitoring hosts in the specified server pool failed. // *Action: Examine the accompanying error messages for details. / 3618, CHA_MONITOR_DB_FAILED_ON_SRVPOOL, "failed to start monitoring database {0} on server pool {1}" // *Cause: An attempt to start monitoring the specified database on the specified server pool failed. // *Action: Examine the accompanying error messages for details. / 3619, CHA_UNMONITOR_HOST_FAILED_ON_CLUSTER, "failed to stop monitoring hosts" // *Cause: An attempt to stop monitoring hosts failed. // *Action: Examine the accompanying error messages for details. / 3620, CHA_UNMONITOR_HOST_FAILED_ON_SRVPOOL, "failed to stop monitoring hosts in server pool {0}" // *Cause: An attempt to stop monitoring hosts in the specified server pool failed. // *Action: Examine the accompanying error messages for details. / 3621, CHA_UNMONITOR_DB_FAILED_ON_SRVPOOL, "failed to stop monitoring database {0} on server pool {1}" // *Cause: An attempt to stop monitoring the specified database on the specified server pool failed. // *Action: Examine the accompanying error messages for details. / 3622, CHA_GET_STATUS_FAILED_ON_SRVPOOL, "failed to retrieve the status of Oracle Cluster Health Analysis Service for server pool {0}" // *Cause: An attempt to retrieve the status of Oracle Cluster Health Analysis Service for the specified server pool failed. // *Action: Examine the accompanying error messages for details. / 3623, CHA_ACTION_REQUEST_FAILED, "failed to request action on Oracle Cluster Health Analysis Service" // *Cause: An attempt to request action on Oracle Cluster Health Analysis Service failed. // *Action: Examine the accompanying error messages for details. / 3624, CHA_MONITOR_DB_FAILED_MODEL, "failed to start monitoring database {0} using model {1}" // *Cause: An attempt to start monitoring the specified database using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3625, CHA_MONITOR_DB_FAILED_ON_NODE_MODEL, "failed to start monitoring database {0} on node {1} using model {2}" // *Cause: An attempt to start monitoring an instance of the specified database on the specified node using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3626, CHA_MONITOR_HOST_FAILED_MODEL, "failed to start monitoring host {0} using model {1}" // *Cause: An attempt to start monitoring the specified host using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3627, CHA_MONITOR_HOST_FAILED_ON_CLUSTER_MODEL, "failed to start monitoring hosts using model {0}" // *Cause: An attempt to start monitoring hosts using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3628, CHA_MONITOR_HOST_FAILED_ON_SRVPOOL_MODEL, "failed to start monitoring hosts in server pool {0} using model {1}" // *Cause: An attempt to start monitoring hosts in the specified server pool using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3629, CHA_MONITOR_DB_FAILED_ON_SRVPOOL_MODEL, "failed to start monitoring database {0} on server pool {1} using model {2}" // *Cause: An attempt to start monitoring the specified database on the specified server pool using the specified model failed. // *Action: Examine the accompanying error messages for details. / 3630, CHA_ALREADY_MONITORED_1, "Hosts are already being monitored using model {0}." // *Cause: A request to monitor hosts was rejected because the hosts were already being monitored using the specified model. // *Action: No action required. / 3631, CHA_ALREADY_MONITORED_2, "Hosts are already being monitored." // *Cause: A request to monitor hosts was rejected because the hosts were already being monitored. // *Action: No action required. / 3632, CHA_ALREADY_MONITORED_3, "Hosts in server pool {0} are already being monitored using model {1}." // *Cause: A request to monitor hosts in the specified server pool was rejected because the hosts were already being monitored using the specified model. // *Action: No action required. / 3633, CHA_ALREADY_MONITORED_4, "Hosts in server pool {0} are already being monitored." // *Cause: A request to monitor hosts in the specified server pool was rejected because the hosts were already being monitored. // *Action: No action required. / 3634, CHA_ALREADY_MONITORED_5, "Host {0} is already being monitored using model {1}." // *Cause: A request to monitor the specified host was rejected because the host was already being monitored using the specified model. // *Action: No action required. / 3635, CHA_ALREADY_MONITORED_6, "Host {0} is already being monitored." // *Cause: A request to monitor the specified host was rejected because the host was already being monitored. // *Action: No action required. / 3636, CHA_ALREADY_MONITORED_7, "Database {0} is already being monitored using model {1}." // *Cause: A request to monitor the specified database was rejected because the database was already being monitored using the specified model. // *Action: No action required. / 3637, CHA_ALREADY_MONITORED_8, "Database {0} is already being monitored." // *Cause: A request to monitor the specified database was rejected because the database was already being monitored. // *Action: No action required. / 3638, CHA_ALREADY_MONITORED_9, "Database {0} is already being monitored in server pool {1} using model {2}." // *Cause: A request to monitor the specified database in the specified server pool was rejected because the database was already being monitored using the specified model. // *Action: No action required. / 3639, CHA_ALREADY_MONITORED_10, "Database {0} is already being monitored in server pool {1}." // *Cause: A request to monitor the specified database in the specified server pool was rejected because the database was already being monitored. // *Action: No action required. / 3640, CHA_ALREADY_MONITORED_11, "Database {0} is already being monitored on host {1} using model {2}." // *Cause: A request to monitor the specified database on the specified node was rejected because the database was already being monitored using the specified model. // *Action: No action required. / 3641, CHA_ALREADY_MONITORED_12, "Database {0} is already being monitored on host {1}." // *Cause: A request to monitor the specified database on the specified node was rejected because the database was already being monitored. // *Action: No action required. / 3642, CHA_ALREADY_UNMONITORED_1, "Hosts are not being monitored." // *Cause: A request to stop monitoring hosts was rejected because the hosts were already not being monitored. // *Action: No action required. / 3643, CHA_ALREADY_UNMONITORED_2, "Hosts in server pool {0} are not being monitored." // *Cause: A request to stop monitoring hosts in the specified server pool was rejected because the hosts were already not being monitored. // *Action: No action required. / 3644, CHA_ALREADY_UNMONITORED_3, "Host {0} is not being monitored." // *Cause: A request to stop monitoring the specified host was rejected because the host was already not being monitored. // *Action: No action required. / 3645, CHA_ALREADY_UNMONITORED_4, "Database {0} is not being monitored." // *Cause: A request to stop monitoring the specified database was rejected because the database was already not being monitored. // *Action: No action required. / 3646, CHA_ALREADY_UNMONITORED_5, "Database {0} is not being monitored in server pool {1}." // *Cause: A request to stop monitoring the specified database in the specified server pool was rejected because the database was already not being monitored. // *Action: No action required. / 3647, CHA_ALREADY_UNMONITORED_6, "Database {0} is not being monitored on node {1}." // *Cause: A request to stop monitoring the specified database on the specified node was rejected because the database was already not being monitored. // *Action: No action required. / 3648, CHA_MONITOR_FAILED_BUILTIN_SERVERPOOL, "failed to start monitoring the target because server pool {0} is a built-in server pool" // *Cause: A request to start monitoring the target was rejected because the specified server pool was a built-in server pool. // *Action: Omit the server pool, or specify a server pool that is not built-in. / 3649, CHA_UNMONITOR_FAILED_BUILTIN_SERVERPOOL, "failed to stop monitoring the target because server pool {0} is a built-in server pool" // *Cause: A request to stop monitoring the target was rejected because the specified server pool was a built-in server pool. // *Action: Omit the server pool, or specify a server pool that is not built-in. / 3650, CHA_STATUS_FAILED_BUILTIN_SERVERPOOL, "failed to retrieve the status of the target because server pool {0} is a built-in server pool" // *Cause: A request to retrieve the status of the target was rejected because the specified server pool was a built-in server pool. // *Action: Omit the server pool, or specify a server pool that is not built-in. / 3651, CHA_MONITOR_FAILED_EMPTY_MODEL_NAME, "invalid empty model name specified" // *Cause: A request to start monitoring the target was rejected because the specified model name was an empty string. // *Action: Reissue the command without a model name if the user wants the target to be monitored with the current model in use, or reissue the command with an existing model name that is different from the current model in use. / 3652, POLICY_MANAGED_CLUSTER, "The specified option combination, -node with -model, is not permitted when server pools are configured." // *Cause: An attempt to start monitoring the target was rejected because the specified option combination, -node with -model, is not permitted when server pools are configured. // *Action: Check the specified option combination. / 3653, ADMIN_MANAGED_CLUSTER, "The specified option, -serverpool, is not permitted when server pools are not configured." // *Cause: An attempt to start or stop monitoring the target was rejected because the specified option, -serverpool, is not permitted when server pools are not configured. // *Action: Check the specified option. / 3654, CALIBRATE_ERROR, "An internal error occurred in Cluster Health Advisor model calibration." // *Cause: An internal error occurred during Cluster Health Advisor model calibration. // *Action: Contact Oracle Support Services. / 3655, NO_DATABASE, "Database {0} does not exist." // *Cause: The specified database did not exist. // *Action: Specify a database that exists. / 3656, INVALID_NODE, "invalid node name: {0}" // *Cause: An attempt to calibrate the model was rejected because an invalid node name was provided. // *Action: Specify a valid node name. / 3657, NO_DATABASE_ON_NODE, "Database {0} has no instance configured on node {1}." // *Cause: The specified database had no instance configured on the specified node. // *Action: Omit the node, or specify a node on which the database has a configured instance. / 3658, NO_SERVERPOOL, "Server pool {0} does not exist." // *Cause: An attempt to calibrate the model was rejected because the specified server pool did not exist. // *Action: Specify a server pool that exists. / 3659, INVALID_SYNTAX, "invalid command line syntax" // *Cause: The syntax of the command was incorrect. // *Action: Examine the usage provided for this command and use the correct syntax. / 3660, MODEL_ALREADY_EXISTS, "The model \"{0}\" already exists." // *Cause: An attempt to create the model was rejected because the model // already existed. // *Action: Specify a new model name or use the -force option to overwrite the // model. / 3661, MODEL_USED_FOR_MONITORING, "The model \"{0}\" is currently used for monitoring." // *Cause: An attempt to calibrate the model was rejected because the model is // currently used for monitoring. // *Action: Stop monitoring by issuing the 'chactl unmonitor' command before // calibrating. / 3662, MONITORING_HOST_MODEL, "monitoring nodes {0} using model {1}" // *Document: No // *Cause: Status message. // *Action: None. / 3663, MONITORING_DB_MODEL, "monitoring database {0} using model {1}" // *Document: No // *Cause: Status message. // *Action: None. / 3664, STATUS_ERROR, "Oracle Cluster Health Analysis Service encountered errors: {0}" // *Cause: An attempt to get the status of Oracle Cluster Health Analysis Service failed. // *Action: Examine the accompanying error messages for details. / 3665, CLUSTER_MONITOR, "The cluster is monitored." // *Document: No // *Cause: Status message. // *Action: None. / 3666, CLUSTER_NOT_MONITOR, "The cluster is not monitored." // *Document: No // *Cause: Status message. // *Action: None. / 3667, CLUSTER_HOST_MONITOR, "The cluster is monitored on hosts: {0}." // *Document: No // *Cause: Status message. // *Action: None. / 3668, CLUSTER_HOST_NOT_MONITOR, "The cluster is not monitored on hosts: {0}." // *Document: No // *Cause: Status message. // *Action: None. / 3669, CLUSTER_SERVER_POOL_MONITOR, "The cluster is monitored on server pools: {0}." // *Document: No // *Cause: Status message. // *Action: None. / 3670, CLUSTER_SERVER_POOL_NOT_MONITOR, "The cluster is not monitored on server pools: {0}." // *Document: No // *Cause: Status message. // *Action: None. / 3671, CLUSTER_DB_MONITOR, "The database {0} is monitored." // *Document: No // *Cause: Status message. // *Action: None. / 3672, CLUSTER_DB_NOT_MONITOR, "The database {0} is not monitored." // *Document: No // *Cause: Status message. // *Action: None. / 3673, CLUSTER_DB_HOST_MONITOR, "Database {0} is monitored on hosts: {1}." // *Document: No // *Cause: Status message. // *Action: None. / 3674, CLUSTER_DB_HOST_NOT_MONITOR, "Database {0} is not monitored on hosts: {1}." // *Document: No // *Cause: Status message. // *Action: None. / 3675, DB_SRVPOOL_MONITOR, "Database {0} is monitored on server pools: {1}." // *Document: No // *Cause: Status message. // *Action: None. / 3676, DB_SRVPOOL_NOT_MONITOR, "Database {0} is not monitored on server pools: {1}." // *Document: No // *Cause: Status message. // *Action: None. / 3677, HOST_HAS_MODEL, "Host {0} has the model {1} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3678, HOST_USE_MODEL, "Host {0} is using the model {1} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3679, CLUSTER_SRVPOOL_USE_MODEL, "Cluster server pool {0} is using the model {1} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3680, CLUSTER_SRVPOOL_HAS_MODEL, "Cluster server pool {0} has the model {1} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3681, DB_HOST_USE_MODEL, "Database {0} on host {1} is using the model {2} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3682, DB_HOST_HAS_MODEL, "Database {0} on host {1} has model {2} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3683, DB_SRVPOOL_USE_MODEL, "Database {0} on server pool {1} is using model {2} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3684, DB_SRVPOOL_HAS_MODEL, "Database {0} on server pool {1} has model {2} with the following attributes:" // *Document: No // *Cause: Status message. // *Action: None. / 3685, CONFIG_BAD_OPTION, "Option '-node' cannot be used with option '-db' or '-serverpool'." // *Cause: A 'chactl config' command specified conflicting options. // *Action: Reissue the command with the correct options. / 3686, CONFIG_DB_SRVPOOL_FAIL, "failed to retrieve server pool {0} configuration for the database {1}" // *Cause: An attempt to retrieve Oracle Cluster Health Analysis Service configuration for the specified database on the server pool failed. // *Action: Examine the accompanying error messages for details. / 3687, CONFIG_DB_FAIL, "failed to retrieve Oracle Cluster Health Analysis Service configuration for the specified database {0}" // *Cause: An attempt to retrieve Oracle Cluster Health Analysis Service configuration for the specified database failed. // *Action: Examine the accompanying error messages for details. / 3688, CONFIG_SRVPOOL_FAIL, "failed to retrieve Oracle Cluster Health Analysis Service configuration for the specified server pool {0}" // *Cause: An attempt to retrieve Oracle Cluster Health Analysis Service configuraiton details for the specified serverpool failed. // *Action: Examine the accompanying error messages for details. / 3689, CONFIG_CLUSTER_FAIL, "failed to retrieve the cluster configuration for Oracle Cluster Health Analysis Service" // *Cause: An attempt to retrieve the cluster configuration for Oracle Cluster Health Analysis Service failed. // *Action: Examine the accompanying error messages for details. / 3690, CONFIG_SQL_FAIL, "A SQL error occurred while retrieving information from Grid Infrastructure Management Repository." // *Cause: An SQL error occurred while connecting to or reading from Grid Infrastructure Management Repository. // *Action: Examine the accompanying error messages for details. / 3691, MONITORING_HOST, "monitoring nodes {0}" // *Document: No // *Cause: Status message. // *Action: None. / 3692, MONITORING_DB, "monitoring databases {0}" // *Document: No // *Cause: Status message. // *Action: None. / 3693, NOT_MONITORING_HOST, "not monitoring nodes" // *Document: No // *Cause: Status message. // *Action: None. / 3694, NOT_MONITORING_DB, "not monitoring database {0}" // *Document: No // *Cause: Status message. // *Action: None. / 3695, NOT_MONITORING_DBS, "not monitoring databases" // *Document: No // *Cause: Status message. // *Action: None. / 3696, MONITORING_DB_INST_MODEL, "monitoring database {0}, instances {1} using model {2}" // *Document: No // *Cause: Status message. // *Action: None. / 3697, MONITORING_INST_NODE, "monitoring instance {0} on node {1}" // *Document: No // *Cause: Status message. // *Action: None. / 3698, MONITORING_DATABASE, "Databases monitored: {0}" // *Document: No // *Cause: Config message // *Action: None. / 3699, MONITORING_NO_DATABASE, "No databases are currently enabled for monitoring." // *Document: No // *Cause: Config message // *Action: None. / 3700, MONITORING_ENABLED, "Monitor: Enabled" // *Document: No // *Cause: Config message // *Action: None. / 3701, MONITORING_DISABLED, "Monitor: Disabled" // *Document: No // *Cause: Config message // *Action: None. / 3702, MONITORING_MODEL, "Model: {0}" // *Document: No // *Cause: Config message // *Action: None. / 3703, MONITORING_MODELS, "Models: {0}" // *Document: No // *Cause: Query message // *Action: None. / 3704, NO_MODEL_FOUND, "no model found" // *Cause: An attempt to query models failed because no models existed in the database. This is an internal error. // *Action: Contact Oracle Support Services. / 3705, NO_INCIDENT, "No abnormal incidents were found." // *Document: No // *Cause: Status message. // *Action: None. / 3706, DETECTED, "detected" // *Document: No // *Cause: Status message. // *Action: None. / 3707, CLEARED, "cleared" // *Document: No // *Cause: Status message. // *Action: None. / 3708, BASE_MODEL, "invalid attempt to remove or rename a base model" // *Cause: An attempt to remove or rename a base model was rejected. // *Action: Reconsider the need for the request, or specify a model that is not a base model. / 3709, DUMPED_STATS, "successfully dumped the CHA statistics to location \"{0}\"" // *Cause: Status message. // *Action: None. / 3710, START_END_DATE, "conflicting start and end times for dump" // *Cause: The specified end date and time for a CHA dump was not later than the start date and time. // *Action: Specify a dump end date and time later than the start date and time. / 3711, NO_DATA_EXIST, "No data exists between start time {0} and end time {1}." // *Cause: A query calibration command specified a time range in which no data existed in the database. // *Action: Specify a time range with existing data. / 3712, NO_DATASET_EXIST, "'dataset' {0} not found" // *Cause: A query calibration command specified an unknown 'dataset' name. // *Action: Specify a known 'dataset' name. / 3713, INVALID_SYNTAX_PART, "invalid command line syntax at '{0}'" // *Cause: The syntax of the command was incorrect. // *Action: Examine the usage provided for this command and retry using correct syntax. / 3714, START_END_TIME, "Start time {0} is not earlier than end time {1}." // *Cause: Specified end time was earlier than the start time. // *Action: Retry, specifying an end time that is later than the start time. / 3715, NO_DATA_EXIST_TABLE, "No monitored data exists in the database." // *Cause: A query calibration command was executed when there was no monitored data in the database. // *Action: Monitor the target for some time and then retry the command. / 3716, TIME_LESS_THAN_DEFAULT, "Specified time is smaller than the allowed minimum {0} hours." // *Cause: An attempt to modify the retention time for targets failed because // the specified time was smaller than the allowed minimum // retention time. // *Action: Retry, specifying a retention time that is greater than or equal // to the minimum retention time. / 3717, SET_RETENTION_SUCCESSFUL, "max retention successfully set to {0} hours" // *Cause: Status message. // *Action: None. / 3718, RESIZE_REPOSITORY_SUCCESSFUL, "repository successfully resized for {0} targets" // *Cause: Status message. // *Action: None. / 3719, WRONG_KPI_CLUSTER, "The specified KPI {0} is not supported for clusters." // *Cause: An attempt to run the command 'chactl query calibration' or // 'chactl calibrate' was rejected because the specified // Key Performance Indicator (KPI) was not supported for clusters. // *Action: Consult the product documentation for a list of supported KPIs. // Retry the command using a supported KPI. / 3720, WRONG_KPI_DB, "The specified KPI {0} is not supported for databases." // *Cause: An attempt to run the command 'chactl query calibration' or // 'chactl calibrate' was rejected because the specified // Key Performance Indicator (KPI) was not supported for databases. // *Action: Consult the product documentation for a list of supported KPIs. // Retry the command using a supported KPI. / 3721, INVALID_FILE_PATH, "Path to specified file is invalid." // *Cause: An attempt to execute the 'chactl' command was rejected because // the path specified in the command was invalid. // *Action: Retry, specifying a valid path. / 3722, FILE_ALREADY_EXISTS, "Specified file already exists." // *Cause: An attempt to execute the 'chactl' command was rejected because // the file already existed. // *Action: Retry the command specifying a path name that does not correspond // to an existing file. / 3723, FILE_EXCEPTION, "Could not write to specified file." // *Cause: An attempt to execute the 'chactl' command was rejected because // an I/O error occured. The accompanying messages provide detailed // failure information. // *Action: Examine the accompanying error messages for details and retry // after fixing the issues reported. / 3724, OS_MISMATCH, "The specified model was calibrated on a different OS" // *Cause: An attempt to import the specified model was rejected because it // was calibrated on a different OS. // *Action: Calibrate the model on the same OS as is running in the cluster to // which the model will be imported, and then retry the import // operation. / 3725, UNSUPPORTED_DB_VERSION, "The database version is not supported for monitoring." // *Cause: An attempt to start monitoring a database was rejected because // the database version was not supported for monitoring. // *Action: Consult the product documentation for supported database versions. / 3726, RESIZE_EXCEEDS_MAX_FILE_SIZE, "Unable to resize the repository for the specified number of entities. An additional {0} GB of free space is required." // *Cause: An attempt to execute the command 'chactl resize repository' was // rejected because there was not sufficient space in the diskgroup // hosting the Grid Infrastructure Management Repository (GIMR). // *Action: Retry the command after adding storage to disk group hosting the // GIMR. / 3727, RESIZE_SHRINK_CONTAINS_DATA, "Unable to shrink the repository because the datafile contains data beyond the requested resize value." // *Cause: An attempt to run the command 'chactl resize repository' was // rejected because the datafile contained data beyond the // requested resize value. // *Action: Retry the command with -force option. All the monitoring data // will be deleted. / 3728, INSUFFICIENT_DATA_SAMPLES, "The number of data samples may not be sufficient for calibration." // *Cause: An attempt to run 'chactl query calibration' detected that the // number of samples in the specified time intervals may not be // sufficient for calibration. // *Action: Retry the command specifying longer time intervals. / 3729, INSUFFICIENT_CALIB_DATA, "The number of data samples {0} is below the required number of data samples {1}." // *Cause: An attempt to calibrate the model was rejected because the number // of samples in the specified time intervals was not sufficient for // calibration. // *Action: Retry the command specifying longer time intervals. / 3730, MAX_MODEL, "The maximum number of models {0} is reached." // *Cause: An attempt to calibrate the model was rejected because the maximum // number of models was reached. // *Action: Remove a model using the command 'chactl remove model' and retry // the calibration operation. / 3731, ALREADY_CALIBRATING, "Another calibration session is running." // *Cause: An attempt to calibrate the model was rejected because there was // another calibration session already running. // *Action: Retry the command after the other calibration has completed. / 3732, FORMAT_NOT_SUPPORTED, "Specified file format is not supported." // *Cause: An attempt to export the repository was rejected because the // specified file format was not supported. // *Action: The list of supported file formats can be found by running the // command 'chactl export -help'. Retry the command specifying a // supported file format. / 3733, EXPDP_FILE_EXISTS, "export file already exist at location {0}" // *Cause: An attempt to export the repository was rejected because an export // file already exists at the specified location. // *Action: Retry the command after deleting all the export files from the // directory. / 3734, EXPDP_ALREADY_RUNNING, "Another export session is running." // *Cause: An attempt to export the repository was rejected because there was // another export session already running. // *Action: Retry the command after the other export has completed. / 3735, EXPDP_STATS, "successfully exported the CHA schema to location \"{0}\"" // *Cause: Status message. // *Action: None. / 3736, TIME_MORE_THAN_MAX, "Specified time is larger than the allowed maximum {0} hours." // *Cause: An attempt to modify the retention time for targets was rejected // because the specified time was was larger than the indicated // maximum allowed retention time. // *Action: Retry, specifying a retention time that is less than or equal to // the maximum retention time. / 3737, WARNING_MAX_RETENTION, "Warning: Based on the available table space and the number of entities monitored, the available retention time is limited to {0} hrs." // *Cause: A status inquiry caused a recalculation of the available // retention time based on the number of monitored entities and the // available table space that resulted in the indicated time, which // is less than the initially specified retention time. // *Action: To have the available retention time conform to the desired // retention time, increase the tablespace size using 'chactl resize // repository' or reduce the number of monitored entities or revise // the goal for retention time using 'chactl set maxretention'. / 3738, ENTITY_LESS_THAN_MIN, "Specified number of entities to be monitored is less than the allowed minimum {0}." // *Cause: An attempt to resize the repository failed because the specified // number of entities to be monitored was not at least the allowed // minimum number. // *Action: Retry, specifying a number of entities to be monitored that is // greater than or equal to the minimum number of entities. / 3739, ENTITY_MORE_THAN_MAX, "Specified number of entities to be monitored is greater than the allowed maximum {0}." // *Cause: An attempt to resize the repository failed because the specified // number of entities to be monitored was not less than the // allowed maximum number of entities. // *Action: Retry, specifying a number of entities to be monitored that is // less than or equal to the maximum number of entities. / 3740, SPACE_AVAILABLE, "Required space {0}GB is available." // *Cause: Status message. // *Action: None. / 3741, SPACE_NOT_AVAILABLE, "Required space {0}GB is not available. An additional {0}GB of free space would be required." // *Cause: Status message. // *Action: None. / 3743, MONITOR_MIN_RETENTION, "not enough space in the repository to retain {0} hours of data" // *Cause: An attempt to monitor the target was rejected because there was not // enough space in the repository to monitor the target with the // minimum retention time. // *Action: Increase the repository size by using the command 'chactl resize // repository', or stop monitoring other targets, and then retry the // operation. / 3744, LOW_RETENTION, "Warning: The current retention time is below the max retention time." // *Cause: The current retention time is below the specified maximum retention // time because there is not enough space in the repository. // *Action: To increase the retention time, increase the repository size using // the command 'chactl resize repository', or stop monitoring some // targets. /